var/home/core/zuul-output/0000755000175000017500000000000015127030574014531 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015127046757015507 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000006525644715127046747017735 0ustar rootrootJan 05 21:51:07 crc systemd[1]: Starting Kubernetes Kubelet... Jan 05 21:51:07 crc restorecon[4746]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:07 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 05 21:51:08 crc restorecon[4746]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 05 21:51:08 crc restorecon[4746]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Jan 05 21:51:08 crc kubenswrapper[4910]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 05 21:51:08 crc kubenswrapper[4910]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Jan 05 21:51:08 crc kubenswrapper[4910]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 05 21:51:08 crc kubenswrapper[4910]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 05 21:51:08 crc kubenswrapper[4910]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Jan 05 21:51:08 crc kubenswrapper[4910]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.538880 4910 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.541897 4910 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.541921 4910 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.541927 4910 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.541931 4910 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.541935 4910 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.541938 4910 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.541943 4910 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.541947 4910 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.541951 4910 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.541955 4910 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.541960 4910 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.541971 4910 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.541975 4910 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.541980 4910 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.541984 4910 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.541989 4910 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.541994 4910 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.541998 4910 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542002 4910 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542005 4910 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542010 4910 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542016 4910 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542020 4910 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542024 4910 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542027 4910 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542031 4910 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542034 4910 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542038 4910 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542042 4910 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542045 4910 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542050 4910 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542055 4910 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542059 4910 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542063 4910 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542067 4910 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542071 4910 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542074 4910 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542078 4910 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542082 4910 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542086 4910 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542089 4910 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542093 4910 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542096 4910 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542100 4910 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542104 4910 feature_gate.go:330] unrecognized feature gate: Example Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542108 4910 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542113 4910 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542119 4910 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542140 4910 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542145 4910 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542148 4910 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542152 4910 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542155 4910 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542160 4910 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542164 4910 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542168 4910 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542171 4910 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542175 4910 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542178 4910 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542182 4910 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542186 4910 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542189 4910 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542193 4910 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542196 4910 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542199 4910 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542203 4910 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542208 4910 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542213 4910 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542216 4910 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542220 4910 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.542223 4910 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542527 4910 flags.go:64] FLAG: --address="0.0.0.0" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542542 4910 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542563 4910 flags.go:64] FLAG: --anonymous-auth="true" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542575 4910 flags.go:64] FLAG: --application-metrics-count-limit="100" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542582 4910 flags.go:64] FLAG: --authentication-token-webhook="false" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542587 4910 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542594 4910 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542605 4910 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542611 4910 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542620 4910 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542626 4910 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542633 4910 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542639 4910 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542644 4910 flags.go:64] FLAG: --cgroup-root="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542649 4910 flags.go:64] FLAG: --cgroups-per-qos="true" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542654 4910 flags.go:64] FLAG: --client-ca-file="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542659 4910 flags.go:64] FLAG: --cloud-config="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542664 4910 flags.go:64] FLAG: --cloud-provider="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542668 4910 flags.go:64] FLAG: --cluster-dns="[]" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542675 4910 flags.go:64] FLAG: --cluster-domain="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542680 4910 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542685 4910 flags.go:64] FLAG: --config-dir="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542689 4910 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542695 4910 flags.go:64] FLAG: --container-log-max-files="5" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542702 4910 flags.go:64] FLAG: --container-log-max-size="10Mi" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542709 4910 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542714 4910 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542720 4910 flags.go:64] FLAG: --containerd-namespace="k8s.io" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542724 4910 flags.go:64] FLAG: --contention-profiling="false" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542729 4910 flags.go:64] FLAG: --cpu-cfs-quota="true" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542734 4910 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542739 4910 flags.go:64] FLAG: --cpu-manager-policy="none" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542744 4910 flags.go:64] FLAG: --cpu-manager-policy-options="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542751 4910 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542756 4910 flags.go:64] FLAG: --enable-controller-attach-detach="true" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542760 4910 flags.go:64] FLAG: --enable-debugging-handlers="true" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542766 4910 flags.go:64] FLAG: --enable-load-reader="false" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542770 4910 flags.go:64] FLAG: --enable-server="true" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542777 4910 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542784 4910 flags.go:64] FLAG: --event-burst="100" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542790 4910 flags.go:64] FLAG: --event-qps="50" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542795 4910 flags.go:64] FLAG: --event-storage-age-limit="default=0" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542800 4910 flags.go:64] FLAG: --event-storage-event-limit="default=0" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542807 4910 flags.go:64] FLAG: --eviction-hard="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542813 4910 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542818 4910 flags.go:64] FLAG: --eviction-minimum-reclaim="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542823 4910 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542828 4910 flags.go:64] FLAG: --eviction-soft="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542833 4910 flags.go:64] FLAG: --eviction-soft-grace-period="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542838 4910 flags.go:64] FLAG: --exit-on-lock-contention="false" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542843 4910 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542848 4910 flags.go:64] FLAG: --experimental-mounter-path="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542853 4910 flags.go:64] FLAG: --fail-cgroupv1="false" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542857 4910 flags.go:64] FLAG: --fail-swap-on="true" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542861 4910 flags.go:64] FLAG: --feature-gates="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542866 4910 flags.go:64] FLAG: --file-check-frequency="20s" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542870 4910 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542875 4910 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542879 4910 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542884 4910 flags.go:64] FLAG: --healthz-port="10248" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542888 4910 flags.go:64] FLAG: --help="false" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542894 4910 flags.go:64] FLAG: --hostname-override="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542899 4910 flags.go:64] FLAG: --housekeeping-interval="10s" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542903 4910 flags.go:64] FLAG: --http-check-frequency="20s" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542908 4910 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542913 4910 flags.go:64] FLAG: --image-credential-provider-config="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542917 4910 flags.go:64] FLAG: --image-gc-high-threshold="85" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542923 4910 flags.go:64] FLAG: --image-gc-low-threshold="80" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542928 4910 flags.go:64] FLAG: --image-service-endpoint="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542933 4910 flags.go:64] FLAG: --kernel-memcg-notification="false" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542937 4910 flags.go:64] FLAG: --kube-api-burst="100" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542951 4910 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542956 4910 flags.go:64] FLAG: --kube-api-qps="50" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542963 4910 flags.go:64] FLAG: --kube-reserved="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542968 4910 flags.go:64] FLAG: --kube-reserved-cgroup="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542973 4910 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542978 4910 flags.go:64] FLAG: --kubelet-cgroups="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542982 4910 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542987 4910 flags.go:64] FLAG: --lock-file="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542992 4910 flags.go:64] FLAG: --log-cadvisor-usage="false" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.542996 4910 flags.go:64] FLAG: --log-flush-frequency="5s" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543000 4910 flags.go:64] FLAG: --log-json-info-buffer-size="0" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543007 4910 flags.go:64] FLAG: --log-json-split-stream="false" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543011 4910 flags.go:64] FLAG: --log-text-info-buffer-size="0" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543016 4910 flags.go:64] FLAG: --log-text-split-stream="false" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543020 4910 flags.go:64] FLAG: --logging-format="text" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543024 4910 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543029 4910 flags.go:64] FLAG: --make-iptables-util-chains="true" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543033 4910 flags.go:64] FLAG: --manifest-url="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543037 4910 flags.go:64] FLAG: --manifest-url-header="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543043 4910 flags.go:64] FLAG: --max-housekeeping-interval="15s" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543047 4910 flags.go:64] FLAG: --max-open-files="1000000" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543052 4910 flags.go:64] FLAG: --max-pods="110" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543056 4910 flags.go:64] FLAG: --maximum-dead-containers="-1" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543060 4910 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543064 4910 flags.go:64] FLAG: --memory-manager-policy="None" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543068 4910 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543073 4910 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543078 4910 flags.go:64] FLAG: --node-ip="192.168.126.11" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543082 4910 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543093 4910 flags.go:64] FLAG: --node-status-max-images="50" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543097 4910 flags.go:64] FLAG: --node-status-update-frequency="10s" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543101 4910 flags.go:64] FLAG: --oom-score-adj="-999" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543105 4910 flags.go:64] FLAG: --pod-cidr="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543109 4910 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543123 4910 flags.go:64] FLAG: --pod-manifest-path="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543140 4910 flags.go:64] FLAG: --pod-max-pids="-1" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543144 4910 flags.go:64] FLAG: --pods-per-core="0" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543149 4910 flags.go:64] FLAG: --port="10250" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543153 4910 flags.go:64] FLAG: --protect-kernel-defaults="false" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543157 4910 flags.go:64] FLAG: --provider-id="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543161 4910 flags.go:64] FLAG: --qos-reserved="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543165 4910 flags.go:64] FLAG: --read-only-port="10255" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543169 4910 flags.go:64] FLAG: --register-node="true" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543174 4910 flags.go:64] FLAG: --register-schedulable="true" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543179 4910 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543186 4910 flags.go:64] FLAG: --registry-burst="10" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543190 4910 flags.go:64] FLAG: --registry-qps="5" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543194 4910 flags.go:64] FLAG: --reserved-cpus="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543199 4910 flags.go:64] FLAG: --reserved-memory="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543204 4910 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543209 4910 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543213 4910 flags.go:64] FLAG: --rotate-certificates="false" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543217 4910 flags.go:64] FLAG: --rotate-server-certificates="false" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543222 4910 flags.go:64] FLAG: --runonce="false" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543226 4910 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543230 4910 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543235 4910 flags.go:64] FLAG: --seccomp-default="false" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543238 4910 flags.go:64] FLAG: --serialize-image-pulls="true" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543243 4910 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543247 4910 flags.go:64] FLAG: --storage-driver-db="cadvisor" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543251 4910 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543256 4910 flags.go:64] FLAG: --storage-driver-password="root" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543265 4910 flags.go:64] FLAG: --storage-driver-secure="false" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543270 4910 flags.go:64] FLAG: --storage-driver-table="stats" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543274 4910 flags.go:64] FLAG: --storage-driver-user="root" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543279 4910 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543285 4910 flags.go:64] FLAG: --sync-frequency="1m0s" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543291 4910 flags.go:64] FLAG: --system-cgroups="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543296 4910 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543304 4910 flags.go:64] FLAG: --system-reserved-cgroup="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543309 4910 flags.go:64] FLAG: --tls-cert-file="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543313 4910 flags.go:64] FLAG: --tls-cipher-suites="[]" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543320 4910 flags.go:64] FLAG: --tls-min-version="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543324 4910 flags.go:64] FLAG: --tls-private-key-file="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543328 4910 flags.go:64] FLAG: --topology-manager-policy="none" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543332 4910 flags.go:64] FLAG: --topology-manager-policy-options="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543337 4910 flags.go:64] FLAG: --topology-manager-scope="container" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543342 4910 flags.go:64] FLAG: --v="2" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543348 4910 flags.go:64] FLAG: --version="false" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543355 4910 flags.go:64] FLAG: --vmodule="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543361 4910 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543365 4910 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543477 4910 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543481 4910 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543486 4910 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543490 4910 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543494 4910 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543502 4910 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543505 4910 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543509 4910 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543513 4910 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543516 4910 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543521 4910 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543526 4910 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543529 4910 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543533 4910 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543537 4910 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543541 4910 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543545 4910 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543554 4910 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543557 4910 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543562 4910 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543567 4910 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543570 4910 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543575 4910 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543579 4910 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543582 4910 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543586 4910 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543590 4910 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543594 4910 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543598 4910 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543601 4910 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543604 4910 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543608 4910 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543611 4910 feature_gate.go:330] unrecognized feature gate: Example Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543615 4910 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543619 4910 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543622 4910 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543626 4910 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543629 4910 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543633 4910 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543636 4910 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543640 4910 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543643 4910 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543647 4910 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543651 4910 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543656 4910 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543659 4910 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543663 4910 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543667 4910 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543671 4910 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543678 4910 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543681 4910 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543685 4910 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543689 4910 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543692 4910 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543696 4910 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543700 4910 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543703 4910 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543707 4910 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543712 4910 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543716 4910 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543719 4910 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543723 4910 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543727 4910 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543732 4910 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543736 4910 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543739 4910 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543743 4910 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543747 4910 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543750 4910 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543754 4910 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.543757 4910 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.543769 4910 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.554974 4910 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.555020 4910 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555216 4910 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555232 4910 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555242 4910 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555250 4910 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555258 4910 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555266 4910 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555274 4910 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555283 4910 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555292 4910 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555300 4910 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555308 4910 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555315 4910 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555323 4910 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555331 4910 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555340 4910 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555349 4910 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555356 4910 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555364 4910 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555373 4910 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555382 4910 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555390 4910 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555399 4910 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555407 4910 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555419 4910 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555430 4910 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555439 4910 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555447 4910 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555459 4910 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555472 4910 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555480 4910 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555489 4910 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555498 4910 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555508 4910 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555518 4910 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555530 4910 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555552 4910 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555563 4910 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555571 4910 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555580 4910 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555590 4910 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555598 4910 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555606 4910 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555614 4910 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555622 4910 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555630 4910 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555638 4910 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555646 4910 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555657 4910 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555667 4910 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555676 4910 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555684 4910 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555692 4910 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555699 4910 feature_gate.go:330] unrecognized feature gate: Example Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555708 4910 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555715 4910 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555734 4910 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555743 4910 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555752 4910 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555760 4910 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555769 4910 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555777 4910 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555786 4910 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555794 4910 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555802 4910 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555810 4910 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555829 4910 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555837 4910 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555845 4910 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555853 4910 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555860 4910 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.555870 4910 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.555885 4910 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556192 4910 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556209 4910 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556218 4910 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556227 4910 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556238 4910 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556250 4910 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556259 4910 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556268 4910 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556277 4910 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556287 4910 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556296 4910 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556304 4910 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556313 4910 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556321 4910 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556329 4910 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556336 4910 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556344 4910 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556352 4910 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556360 4910 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556370 4910 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556379 4910 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556386 4910 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556395 4910 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556402 4910 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556410 4910 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556419 4910 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556426 4910 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556434 4910 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556443 4910 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556451 4910 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556459 4910 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556466 4910 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556475 4910 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556482 4910 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556492 4910 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556500 4910 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556510 4910 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556520 4910 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556529 4910 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556538 4910 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556547 4910 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556556 4910 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556567 4910 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556576 4910 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556584 4910 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556592 4910 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556600 4910 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556608 4910 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556616 4910 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556624 4910 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556632 4910 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556640 4910 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556650 4910 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556660 4910 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556673 4910 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556682 4910 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556692 4910 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556700 4910 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556708 4910 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556716 4910 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556724 4910 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556732 4910 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556741 4910 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556748 4910 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556756 4910 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556764 4910 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556772 4910 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556780 4910 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556788 4910 feature_gate.go:330] unrecognized feature gate: Example Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556795 4910 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.556804 4910 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.556817 4910 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.557463 4910 server.go:940] "Client rotation is on, will bootstrap in background" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.562823 4910 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.562971 4910 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.563811 4910 server.go:997] "Starting client certificate rotation" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.563861 4910 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.564027 4910 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2026-01-16 07:01:10.772475308 +0000 UTC Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.564171 4910 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 249h10m2.208307209s for next certificate rotation Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.573832 4910 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.577899 4910 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.588041 4910 log.go:25] "Validated CRI v1 runtime API" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.606845 4910 log.go:25] "Validated CRI v1 image API" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.609251 4910 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.612584 4910 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2026-01-05-21-40-34-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.612636 4910 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.627053 4910 manager.go:217] Machine: {Timestamp:2026-01-05 21:51:08.625435932 +0000 UTC m=+0.202933622 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:13985a1a-3617-450c-bc3b-e969b1c68d1d BootID:a68cccef-4498-48d1-bd1d-f77912a8fbc0 Filesystems:[{Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:4e:d6:4e Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:4e:d6:4e Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:ef:21:6d Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:7b:98:5b Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:fe:12:15 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:7c:03:54 Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:f7:a2:6f Speed:-1 Mtu:1496} {Name:eth10 MacAddress:42:4e:0b:79:bf:e6 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:a2:8e:87:84:da:fa Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.627507 4910 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.627817 4910 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.628989 4910 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.629240 4910 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.629283 4910 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.629544 4910 topology_manager.go:138] "Creating topology manager with none policy" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.629560 4910 container_manager_linux.go:303] "Creating device plugin manager" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.629787 4910 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.629832 4910 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.630096 4910 state_mem.go:36] "Initialized new in-memory state store" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.630703 4910 server.go:1245] "Using root directory" path="/var/lib/kubelet" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.631477 4910 kubelet.go:418] "Attempting to sync node with API server" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.631528 4910 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.631564 4910 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.631581 4910 kubelet.go:324] "Adding apiserver pod source" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.631597 4910 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.634316 4910 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.636589 4910 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.637751 4910 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.166:6443: connect: connection refused Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.637880 4910 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.166:6443: connect: connection refused Jan 05 21:51:08 crc kubenswrapper[4910]: E0105 21:51:08.637932 4910 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.166:6443: connect: connection refused" logger="UnhandledError" Jan 05 21:51:08 crc kubenswrapper[4910]: E0105 21:51:08.637996 4910 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.166:6443: connect: connection refused" logger="UnhandledError" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.638186 4910 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.639567 4910 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.639619 4910 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.639635 4910 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.639649 4910 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.639672 4910 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.639688 4910 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.639703 4910 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.639726 4910 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.639742 4910 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.639758 4910 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.639805 4910 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.639822 4910 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.640084 4910 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.640796 4910 server.go:1280] "Started kubelet" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.641029 4910 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.166:6443: connect: connection refused Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.641359 4910 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.641719 4910 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.642300 4910 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.643247 4910 server.go:460] "Adding debug handlers to kubelet server" Jan 05 21:51:08 crc kubenswrapper[4910]: E0105 21:51:08.643036 4910 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.166:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.1887f430ab99238a default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-05 21:51:08.640756618 +0000 UTC m=+0.218254298,LastTimestamp:2026-01-05 21:51:08.640756618 +0000 UTC m=+0.218254298,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 05 21:51:08 crc systemd[1]: Started Kubernetes Kubelet. Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.645691 4910 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.645832 4910 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.646192 4910 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 06:08:57.109872161 +0000 UTC Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.646393 4910 volume_manager.go:287] "The desired_state_of_world populator starts" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.646408 4910 volume_manager.go:289] "Starting Kubelet Volume Manager" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.646513 4910 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.647185 4910 factory.go:55] Registering systemd factory Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.647215 4910 factory.go:221] Registration of the systemd container factory successfully Jan 05 21:51:08 crc kubenswrapper[4910]: E0105 21:51:08.647411 4910 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 05 21:51:08 crc kubenswrapper[4910]: E0105 21:51:08.647747 4910 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.166:6443: connect: connection refused" interval="200ms" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.647841 4910 factory.go:153] Registering CRI-O factory Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.647861 4910 factory.go:221] Registration of the crio container factory successfully Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.647952 4910 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.647985 4910 factory.go:103] Registering Raw factory Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.647949 4910 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.166:6443: connect: connection refused Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.648013 4910 manager.go:1196] Started watching for new ooms in manager Jan 05 21:51:08 crc kubenswrapper[4910]: E0105 21:51:08.650934 4910 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.166:6443: connect: connection refused" logger="UnhandledError" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.652582 4910 manager.go:319] Starting recovery of all containers Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656561 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656612 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656625 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656635 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656647 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656657 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656668 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656677 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656688 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656698 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656708 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656720 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656730 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656742 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656751 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656762 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656773 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656785 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656797 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656808 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656819 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656831 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656843 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656881 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656894 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656905 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656921 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656933 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656947 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656957 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656968 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656986 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.656997 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.657008 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.657020 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.657030 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.657040 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.657050 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.657061 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.657070 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.657079 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.657089 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.657099 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.657111 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.657142 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.657156 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.657170 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.657181 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.657194 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.657207 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.657218 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.657228 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.657242 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.657253 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.657264 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.657274 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.657283 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.657295 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.657304 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.662314 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.664031 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.664844 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.666497 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.666708 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.666828 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.666955 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.667082 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.667813 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.667967 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.668100 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.668270 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.668441 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.668578 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.668797 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.668887 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.668975 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.669014 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.669065 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.669106 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.669201 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.669367 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.669512 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.669627 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.669758 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.669941 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.669972 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.670008 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.670035 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.670061 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.671930 4910 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.671971 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.671994 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672013 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672071 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672089 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672104 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672140 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672159 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672175 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672193 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672210 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672227 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672244 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672262 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672276 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672314 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672339 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672364 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672385 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672403 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672423 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672441 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672500 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672539 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672557 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672575 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672592 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672613 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672628 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672645 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672661 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672678 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672694 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672736 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672776 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672793 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672810 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672827 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672843 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672862 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672877 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672893 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672909 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672927 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672942 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672960 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672978 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.672996 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673014 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673033 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673050 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673067 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673084 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673161 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673177 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673196 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673233 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673250 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673267 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673283 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673299 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673315 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673332 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673350 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673365 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673383 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673405 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673449 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673466 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673482 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673497 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673516 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673534 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673550 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673568 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673586 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673603 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673618 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673636 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673655 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673672 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673689 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673705 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673721 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673738 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673756 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673772 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673790 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673804 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673822 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673839 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673855 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673872 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673921 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673939 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673957 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673974 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.673991 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.674010 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.674028 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.674044 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.674062 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.674079 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.674098 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.674114 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.674176 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.674193 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.674210 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.674227 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.674243 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.674261 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.674276 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.674293 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.674311 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.674327 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.674344 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.674360 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.674377 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.674391 4910 reconstruct.go:97] "Volume reconstruction finished" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.674400 4910 reconciler.go:26] "Reconciler: start to sync state" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.688227 4910 manager.go:324] Recovery completed Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.698718 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.701373 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.701474 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.701497 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.703045 4910 cpu_manager.go:225] "Starting CPU manager" policy="none" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.703086 4910 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.703235 4910 state_mem.go:36] "Initialized new in-memory state store" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.715855 4910 policy_none.go:49] "None policy: Start" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.715915 4910 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.718846 4910 memory_manager.go:170] "Starting memorymanager" policy="None" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.718891 4910 state_mem.go:35] "Initializing new in-memory state store" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.720189 4910 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.720231 4910 status_manager.go:217] "Starting to sync pod status with apiserver" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.720255 4910 kubelet.go:2335] "Starting kubelet main sync loop" Jan 05 21:51:08 crc kubenswrapper[4910]: E0105 21:51:08.720304 4910 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.721101 4910 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.166:6443: connect: connection refused Jan 05 21:51:08 crc kubenswrapper[4910]: E0105 21:51:08.721213 4910 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.166:6443: connect: connection refused" logger="UnhandledError" Jan 05 21:51:08 crc kubenswrapper[4910]: E0105 21:51:08.748345 4910 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.787610 4910 manager.go:334] "Starting Device Plugin manager" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.787688 4910 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.787718 4910 server.go:79] "Starting device plugin registration server" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.788168 4910 eviction_manager.go:189] "Eviction manager: starting control loop" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.788181 4910 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.788528 4910 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.788612 4910 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.788619 4910 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Jan 05 21:51:08 crc kubenswrapper[4910]: E0105 21:51:08.794929 4910 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.820905 4910 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.821040 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.822454 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.822505 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.822523 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.822704 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.822953 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.823069 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.823734 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.823780 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.823800 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.823955 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.824303 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.824386 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.824630 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.824663 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.824673 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.824917 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.824936 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.824946 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.825265 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.825511 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.825594 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.826451 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.826474 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.826485 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.826858 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.826885 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.826901 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.827712 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.827886 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.828027 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.828345 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.828537 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.828590 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.829794 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.829818 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.829824 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.829858 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.829873 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.829828 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.830154 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.830187 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.831025 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.831050 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.831060 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:08 crc kubenswrapper[4910]: E0105 21:51:08.851523 4910 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.166:6443: connect: connection refused" interval="400ms" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.876771 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.876806 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.876894 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.876943 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.876974 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.876994 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.877010 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.877032 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.877051 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.877080 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.877100 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.877153 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.877200 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.877247 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.877371 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: W0105 21:51:08.878510 4910 helpers.go:245] readString: Failed to read "/sys/fs/cgroup/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/cpuset.cpus.effective": read /sys/fs/cgroup/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/cpuset.cpus.effective: no such device Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.889040 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.890508 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.890563 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.890583 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.890622 4910 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 05 21:51:08 crc kubenswrapper[4910]: E0105 21:51:08.891253 4910 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.166:6443: connect: connection refused" node="crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.978407 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.978464 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.978489 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.978510 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.978529 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.978549 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.978567 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.978586 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.978607 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.978602 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.978631 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.978616 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.978658 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.978664 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.978602 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.978737 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.978747 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.978748 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.978766 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.978765 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.978787 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.978765 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.978787 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.978791 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.978601 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.978835 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.978838 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.978870 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.978965 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 05 21:51:08 crc kubenswrapper[4910]: I0105 21:51:08.979048 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.091485 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.093461 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.093525 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.093540 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.093615 4910 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 05 21:51:09 crc kubenswrapper[4910]: E0105 21:51:09.094320 4910 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.166:6443: connect: connection refused" node="crc" Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.160754 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.175196 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 05 21:51:09 crc kubenswrapper[4910]: W0105 21:51:09.192512 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-9250f3ea7a24a5d6552cbbe57e8eda993aec6e84d2573b2bffee67865b5d35b5 WatchSource:0}: Error finding container 9250f3ea7a24a5d6552cbbe57e8eda993aec6e84d2573b2bffee67865b5d35b5: Status 404 returned error can't find the container with id 9250f3ea7a24a5d6552cbbe57e8eda993aec6e84d2573b2bffee67865b5d35b5 Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.193740 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:51:09 crc kubenswrapper[4910]: W0105 21:51:09.194824 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-73bf90367381e4aedc8afca06a602d7020d30554b7ed5b42df8435b9b152623f WatchSource:0}: Error finding container 73bf90367381e4aedc8afca06a602d7020d30554b7ed5b42df8435b9b152623f: Status 404 returned error can't find the container with id 73bf90367381e4aedc8afca06a602d7020d30554b7ed5b42df8435b9b152623f Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.208594 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 05 21:51:09 crc kubenswrapper[4910]: W0105 21:51:09.210286 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-9a3c286293a7412389c1454cb2aad79517cd38f1f32854b16ab4b7499edb9505 WatchSource:0}: Error finding container 9a3c286293a7412389c1454cb2aad79517cd38f1f32854b16ab4b7499edb9505: Status 404 returned error can't find the container with id 9a3c286293a7412389c1454cb2aad79517cd38f1f32854b16ab4b7499edb9505 Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.216543 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 05 21:51:09 crc kubenswrapper[4910]: W0105 21:51:09.230749 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-df765de74f7d1cf64fdf7c608f104b2564628cc4b6645e2444825f47fa87aebf WatchSource:0}: Error finding container df765de74f7d1cf64fdf7c608f104b2564628cc4b6645e2444825f47fa87aebf: Status 404 returned error can't find the container with id df765de74f7d1cf64fdf7c608f104b2564628cc4b6645e2444825f47fa87aebf Jan 05 21:51:09 crc kubenswrapper[4910]: W0105 21:51:09.239373 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-97ea3effcda62ced6b7faed4a3d60421de7d508d6e2f756a77231c1e887075bc WatchSource:0}: Error finding container 97ea3effcda62ced6b7faed4a3d60421de7d508d6e2f756a77231c1e887075bc: Status 404 returned error can't find the container with id 97ea3effcda62ced6b7faed4a3d60421de7d508d6e2f756a77231c1e887075bc Jan 05 21:51:09 crc kubenswrapper[4910]: E0105 21:51:09.252336 4910 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.166:6443: connect: connection refused" interval="800ms" Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.495268 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.497195 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.497259 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.497279 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.497319 4910 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 05 21:51:09 crc kubenswrapper[4910]: E0105 21:51:09.497837 4910 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.166:6443: connect: connection refused" node="crc" Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.642741 4910 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.166:6443: connect: connection refused Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.646850 4910 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-28 13:24:24.270281444 +0000 UTC Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.726490 4910 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="2ba8aaa4607532622d43bf6d56597e26066226abc45454f5be583c22c41b108e" exitCode=0 Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.726587 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"2ba8aaa4607532622d43bf6d56597e26066226abc45454f5be583c22c41b108e"} Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.726706 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"97ea3effcda62ced6b7faed4a3d60421de7d508d6e2f756a77231c1e887075bc"} Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.726821 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.728205 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.728238 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.728250 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.729216 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e"} Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.729239 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"df765de74f7d1cf64fdf7c608f104b2564628cc4b6645e2444825f47fa87aebf"} Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.730677 4910 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5" exitCode=0 Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.730731 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5"} Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.730749 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"9a3c286293a7412389c1454cb2aad79517cd38f1f32854b16ab4b7499edb9505"} Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.730830 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.732539 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.732566 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.732577 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.733881 4910 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb" exitCode=0 Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.734135 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.734077 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb"} Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.734292 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"73bf90367381e4aedc8afca06a602d7020d30554b7ed5b42df8435b9b152623f"} Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.734626 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.735464 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.735565 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.735655 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.736852 4910 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="615555f2bb3e1c61416edc24a76eddcfe181a553284add131686a55edbadb29c" exitCode=0 Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.736937 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"615555f2bb3e1c61416edc24a76eddcfe181a553284add131686a55edbadb29c"} Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.736999 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"9250f3ea7a24a5d6552cbbe57e8eda993aec6e84d2573b2bffee67865b5d35b5"} Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.737102 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.737687 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.737755 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.737878 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.737984 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.738016 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:09 crc kubenswrapper[4910]: I0105 21:51:09.738030 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:09 crc kubenswrapper[4910]: W0105 21:51:09.902550 4910 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.166:6443: connect: connection refused Jan 05 21:51:09 crc kubenswrapper[4910]: E0105 21:51:09.902651 4910 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.166:6443: connect: connection refused" logger="UnhandledError" Jan 05 21:51:10 crc kubenswrapper[4910]: W0105 21:51:10.044695 4910 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.166:6443: connect: connection refused Jan 05 21:51:10 crc kubenswrapper[4910]: E0105 21:51:10.044813 4910 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.166:6443: connect: connection refused" logger="UnhandledError" Jan 05 21:51:10 crc kubenswrapper[4910]: E0105 21:51:10.054298 4910 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.166:6443: connect: connection refused" interval="1.6s" Jan 05 21:51:10 crc kubenswrapper[4910]: W0105 21:51:10.072248 4910 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.166:6443: connect: connection refused Jan 05 21:51:10 crc kubenswrapper[4910]: E0105 21:51:10.072380 4910 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.166:6443: connect: connection refused" logger="UnhandledError" Jan 05 21:51:10 crc kubenswrapper[4910]: W0105 21:51:10.219303 4910 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.166:6443: connect: connection refused Jan 05 21:51:10 crc kubenswrapper[4910]: E0105 21:51:10.219397 4910 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.166:6443: connect: connection refused" logger="UnhandledError" Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.298603 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.300274 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.300864 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.300877 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.300914 4910 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 05 21:51:10 crc kubenswrapper[4910]: E0105 21:51:10.301275 4910 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.166:6443: connect: connection refused" node="crc" Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.647833 4910 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 17:58:21.128277942 +0000 UTC Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.746910 4910 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe" exitCode=0 Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.746979 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe"} Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.747146 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.749013 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.749056 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.749069 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.750636 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"88a32f17e02a9d35c306705ff1ac0f65b2d02d2a7f376412f37632608dbc2711"} Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.750774 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.751818 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.751849 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.751863 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.755092 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"20c3f8271da0182ae792c01d42dc43c0732466b8d049fbc27a95f86a28da1ef9"} Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.755118 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"82b9d35b7a2b2ca1de438b27b3280478cbd8aa200a186456585bc20994359e5a"} Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.755148 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"c366ec1be5116c8015777a182415c623173912f309b8dcc52e2dd58be79908ec"} Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.755215 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.756716 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.756739 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.756748 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.758633 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.758727 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923"} Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.758778 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17"} Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.758807 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872"} Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.759633 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.759677 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.759687 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.763443 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572"} Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.763475 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e"} Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.763495 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045"} Jan 05 21:51:10 crc kubenswrapper[4910]: I0105 21:51:10.763509 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824"} Jan 05 21:51:11 crc kubenswrapper[4910]: I0105 21:51:11.648638 4910 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 08:44:06.645537475 +0000 UTC Jan 05 21:51:11 crc kubenswrapper[4910]: I0105 21:51:11.771727 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6"} Jan 05 21:51:11 crc kubenswrapper[4910]: I0105 21:51:11.771872 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:11 crc kubenswrapper[4910]: I0105 21:51:11.773549 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:11 crc kubenswrapper[4910]: I0105 21:51:11.773603 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:11 crc kubenswrapper[4910]: I0105 21:51:11.773616 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:11 crc kubenswrapper[4910]: I0105 21:51:11.775490 4910 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9" exitCode=0 Jan 05 21:51:11 crc kubenswrapper[4910]: I0105 21:51:11.775574 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9"} Jan 05 21:51:11 crc kubenswrapper[4910]: I0105 21:51:11.775625 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:11 crc kubenswrapper[4910]: I0105 21:51:11.775817 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:11 crc kubenswrapper[4910]: I0105 21:51:11.777047 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:11 crc kubenswrapper[4910]: I0105 21:51:11.777073 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:11 crc kubenswrapper[4910]: I0105 21:51:11.777085 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:11 crc kubenswrapper[4910]: I0105 21:51:11.777220 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:11 crc kubenswrapper[4910]: I0105 21:51:11.777266 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:11 crc kubenswrapper[4910]: I0105 21:51:11.777283 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:11 crc kubenswrapper[4910]: I0105 21:51:11.902211 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:11 crc kubenswrapper[4910]: I0105 21:51:11.903976 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:11 crc kubenswrapper[4910]: I0105 21:51:11.904031 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:11 crc kubenswrapper[4910]: I0105 21:51:11.904052 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:11 crc kubenswrapper[4910]: I0105 21:51:11.904091 4910 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 05 21:51:12 crc kubenswrapper[4910]: I0105 21:51:12.649779 4910 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-04 07:31:11.85221679 +0000 UTC Jan 05 21:51:12 crc kubenswrapper[4910]: I0105 21:51:12.786502 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a"} Jan 05 21:51:12 crc kubenswrapper[4910]: I0105 21:51:12.786572 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1"} Jan 05 21:51:12 crc kubenswrapper[4910]: I0105 21:51:12.786591 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b"} Jan 05 21:51:12 crc kubenswrapper[4910]: I0105 21:51:12.786605 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea"} Jan 05 21:51:12 crc kubenswrapper[4910]: I0105 21:51:12.786609 4910 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 05 21:51:12 crc kubenswrapper[4910]: I0105 21:51:12.786687 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:12 crc kubenswrapper[4910]: I0105 21:51:12.788089 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:12 crc kubenswrapper[4910]: I0105 21:51:12.788177 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:12 crc kubenswrapper[4910]: I0105 21:51:12.788198 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:13 crc kubenswrapper[4910]: I0105 21:51:13.588270 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:51:13 crc kubenswrapper[4910]: I0105 21:51:13.650934 4910 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 11:10:02.461832744 +0000 UTC Jan 05 21:51:13 crc kubenswrapper[4910]: I0105 21:51:13.796006 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a"} Jan 05 21:51:13 crc kubenswrapper[4910]: I0105 21:51:13.796110 4910 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 05 21:51:13 crc kubenswrapper[4910]: I0105 21:51:13.796910 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:13 crc kubenswrapper[4910]: I0105 21:51:13.797459 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:13 crc kubenswrapper[4910]: I0105 21:51:13.799078 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:13 crc kubenswrapper[4910]: I0105 21:51:13.799158 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:13 crc kubenswrapper[4910]: I0105 21:51:13.799196 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:13 crc kubenswrapper[4910]: I0105 21:51:13.799202 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:13 crc kubenswrapper[4910]: I0105 21:51:13.799258 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:13 crc kubenswrapper[4910]: I0105 21:51:13.799231 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:14 crc kubenswrapper[4910]: I0105 21:51:14.651644 4910 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 04:32:02.143992169 +0000 UTC Jan 05 21:51:14 crc kubenswrapper[4910]: I0105 21:51:14.799625 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:14 crc kubenswrapper[4910]: I0105 21:51:14.801063 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:14 crc kubenswrapper[4910]: I0105 21:51:14.801149 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:14 crc kubenswrapper[4910]: I0105 21:51:14.801171 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:14 crc kubenswrapper[4910]: I0105 21:51:14.835972 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 05 21:51:14 crc kubenswrapper[4910]: I0105 21:51:14.836357 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:14 crc kubenswrapper[4910]: I0105 21:51:14.838259 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:14 crc kubenswrapper[4910]: I0105 21:51:14.838319 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:14 crc kubenswrapper[4910]: I0105 21:51:14.838334 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:14 crc kubenswrapper[4910]: I0105 21:51:14.886836 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:51:14 crc kubenswrapper[4910]: I0105 21:51:14.887016 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:14 crc kubenswrapper[4910]: I0105 21:51:14.888581 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:14 crc kubenswrapper[4910]: I0105 21:51:14.888645 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:14 crc kubenswrapper[4910]: I0105 21:51:14.888669 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:14 crc kubenswrapper[4910]: I0105 21:51:14.915441 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Jan 05 21:51:15 crc kubenswrapper[4910]: I0105 21:51:15.413203 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:51:15 crc kubenswrapper[4910]: I0105 21:51:15.652483 4910 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 09:14:16.392061078 +0000 UTC Jan 05 21:51:15 crc kubenswrapper[4910]: I0105 21:51:15.652585 4910 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 275h23m0.739483956s for next certificate rotation Jan 05 21:51:15 crc kubenswrapper[4910]: I0105 21:51:15.803029 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:15 crc kubenswrapper[4910]: I0105 21:51:15.803029 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:15 crc kubenswrapper[4910]: I0105 21:51:15.804807 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:15 crc kubenswrapper[4910]: I0105 21:51:15.804869 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:15 crc kubenswrapper[4910]: I0105 21:51:15.804890 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:15 crc kubenswrapper[4910]: I0105 21:51:15.805248 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:15 crc kubenswrapper[4910]: I0105 21:51:15.805296 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:15 crc kubenswrapper[4910]: I0105 21:51:15.805316 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:16 crc kubenswrapper[4910]: I0105 21:51:16.025889 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 05 21:51:16 crc kubenswrapper[4910]: I0105 21:51:16.026286 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:16 crc kubenswrapper[4910]: I0105 21:51:16.027881 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:16 crc kubenswrapper[4910]: I0105 21:51:16.027978 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:16 crc kubenswrapper[4910]: I0105 21:51:16.028005 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:16 crc kubenswrapper[4910]: I0105 21:51:16.297736 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 05 21:51:16 crc kubenswrapper[4910]: I0105 21:51:16.465957 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 05 21:51:16 crc kubenswrapper[4910]: I0105 21:51:16.474778 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 05 21:51:16 crc kubenswrapper[4910]: I0105 21:51:16.806173 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:16 crc kubenswrapper[4910]: I0105 21:51:16.807601 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:16 crc kubenswrapper[4910]: I0105 21:51:16.807727 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:16 crc kubenswrapper[4910]: I0105 21:51:16.807748 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:17 crc kubenswrapper[4910]: I0105 21:51:17.809040 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:17 crc kubenswrapper[4910]: I0105 21:51:17.810030 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:17 crc kubenswrapper[4910]: I0105 21:51:17.810067 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:17 crc kubenswrapper[4910]: I0105 21:51:17.810079 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:18 crc kubenswrapper[4910]: E0105 21:51:18.795186 4910 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 05 21:51:19 crc kubenswrapper[4910]: I0105 21:51:19.972560 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 05 21:51:19 crc kubenswrapper[4910]: I0105 21:51:19.972919 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:19 crc kubenswrapper[4910]: I0105 21:51:19.979113 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:19 crc kubenswrapper[4910]: I0105 21:51:19.979226 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:19 crc kubenswrapper[4910]: I0105 21:51:19.979253 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:19 crc kubenswrapper[4910]: I0105 21:51:19.982384 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 05 21:51:20 crc kubenswrapper[4910]: E0105 21:51:20.463410 4910 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": net/http: TLS handshake timeout" event="&Event{ObjectMeta:{crc.1887f430ab99238a default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-05 21:51:08.640756618 +0000 UTC m=+0.218254298,LastTimestamp:2026-01-05 21:51:08.640756618 +0000 UTC m=+0.218254298,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 05 21:51:20 crc kubenswrapper[4910]: I0105 21:51:20.643901 4910 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Jan 05 21:51:20 crc kubenswrapper[4910]: I0105 21:51:20.817308 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:20 crc kubenswrapper[4910]: I0105 21:51:20.818379 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:20 crc kubenswrapper[4910]: I0105 21:51:20.818426 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:20 crc kubenswrapper[4910]: I0105 21:51:20.818440 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:20 crc kubenswrapper[4910]: I0105 21:51:20.830139 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Jan 05 21:51:20 crc kubenswrapper[4910]: I0105 21:51:20.830295 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:20 crc kubenswrapper[4910]: I0105 21:51:20.831487 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:20 crc kubenswrapper[4910]: I0105 21:51:20.831528 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:20 crc kubenswrapper[4910]: I0105 21:51:20.831543 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:21 crc kubenswrapper[4910]: I0105 21:51:21.614540 4910 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 05 21:51:21 crc kubenswrapper[4910]: I0105 21:51:21.614634 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 05 21:51:21 crc kubenswrapper[4910]: I0105 21:51:21.628395 4910 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 05 21:51:21 crc kubenswrapper[4910]: I0105 21:51:21.628524 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 05 21:51:22 crc kubenswrapper[4910]: I0105 21:51:22.972849 4910 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 05 21:51:22 crc kubenswrapper[4910]: I0105 21:51:22.972939 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 05 21:51:25 crc kubenswrapper[4910]: I0105 21:51:25.421272 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:51:25 crc kubenswrapper[4910]: I0105 21:51:25.421588 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:25 crc kubenswrapper[4910]: I0105 21:51:25.423397 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:25 crc kubenswrapper[4910]: I0105 21:51:25.423447 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:25 crc kubenswrapper[4910]: I0105 21:51:25.423460 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:25 crc kubenswrapper[4910]: I0105 21:51:25.426843 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:51:25 crc kubenswrapper[4910]: I0105 21:51:25.834442 4910 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 05 21:51:25 crc kubenswrapper[4910]: I0105 21:51:25.834527 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:25 crc kubenswrapper[4910]: I0105 21:51:25.840087 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:25 crc kubenswrapper[4910]: I0105 21:51:25.840689 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:25 crc kubenswrapper[4910]: I0105 21:51:25.840711 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:26 crc kubenswrapper[4910]: E0105 21:51:26.636857 4910 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="3.2s" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.637554 4910 trace.go:236] Trace[130866715]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (05-Jan-2026 21:51:13.001) (total time: 13636ms): Jan 05 21:51:26 crc kubenswrapper[4910]: Trace[130866715]: ---"Objects listed" error: 13636ms (21:51:26.637) Jan 05 21:51:26 crc kubenswrapper[4910]: Trace[130866715]: [13.636171422s] [13.636171422s] END Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.637595 4910 trace.go:236] Trace[1478899164]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (05-Jan-2026 21:51:11.942) (total time: 14694ms): Jan 05 21:51:26 crc kubenswrapper[4910]: Trace[1478899164]: ---"Objects listed" error: 14693ms (21:51:26.635) Jan 05 21:51:26 crc kubenswrapper[4910]: Trace[1478899164]: [14.694018853s] [14.694018853s] END Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.637653 4910 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.637600 4910 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.638207 4910 trace.go:236] Trace[1671040409]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (05-Jan-2026 21:51:13.037) (total time: 13600ms): Jan 05 21:51:26 crc kubenswrapper[4910]: Trace[1671040409]: ---"Objects listed" error: 13600ms (21:51:26.637) Jan 05 21:51:26 crc kubenswrapper[4910]: Trace[1671040409]: [13.600392174s] [13.600392174s] END Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.638259 4910 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.639966 4910 trace.go:236] Trace[593502362]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (05-Jan-2026 21:51:12.068) (total time: 14571ms): Jan 05 21:51:26 crc kubenswrapper[4910]: Trace[593502362]: ---"Objects listed" error: 14571ms (21:51:26.639) Jan 05 21:51:26 crc kubenswrapper[4910]: Trace[593502362]: [14.571251023s] [14.571251023s] END Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.640009 4910 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.640849 4910 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Jan 05 21:51:26 crc kubenswrapper[4910]: E0105 21:51:26.642098 4910 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.646501 4910 apiserver.go:52] "Watching apiserver" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.651972 4910 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.652535 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb"] Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.653244 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:51:26 crc kubenswrapper[4910]: E0105 21:51:26.653382 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.653751 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.653868 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.653888 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 05 21:51:26 crc kubenswrapper[4910]: E0105 21:51:26.653968 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.654005 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.654152 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 05 21:51:26 crc kubenswrapper[4910]: E0105 21:51:26.654898 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.659417 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.660446 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.661733 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.662016 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.662059 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.662164 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.662291 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.662396 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.666114 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.683066 4910 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:46048->192.168.126.11:17697: read: connection reset by peer" start-of-body= Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.683195 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:46048->192.168.126.11:17697: read: connection reset by peer" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.683555 4910 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:46062->192.168.126.11:17697: read: connection reset by peer" start-of-body= Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.683646 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:46062->192.168.126.11:17697: read: connection reset by peer" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.684246 4910 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.684483 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.708621 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.747904 4910 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.766153 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.778687 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.796224 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.815962 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.832182 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.839775 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.842545 4910 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6" exitCode=255 Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.842610 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6"} Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.847399 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.847491 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.847847 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.847886 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.847919 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.847946 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.847971 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.847994 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.848021 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.848050 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.848091 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.848114 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.848157 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.848178 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.848196 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.848216 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.848236 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.848261 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.848286 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.848312 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.848340 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.848191 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.848379 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.848401 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.848519 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.848691 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.848751 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.848833 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.848873 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.848860 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849061 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849102 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849102 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849142 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.848366 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849256 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849262 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849296 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849327 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849341 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849354 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849373 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849394 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849406 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849426 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849512 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849550 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849577 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849580 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849601 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849605 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849627 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849677 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849703 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849730 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849755 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849779 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849803 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849828 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849851 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849875 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849898 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849921 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849928 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849947 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849962 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.849978 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850010 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850035 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850059 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850075 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850087 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850113 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850158 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850174 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850182 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850259 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850265 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850313 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850331 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850361 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850393 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850422 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850437 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850449 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850482 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850511 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850537 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850568 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850581 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850577 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850602 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850640 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850641 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850669 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850665 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850684 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850704 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850753 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850803 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850833 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850868 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850906 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850936 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850964 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850991 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851021 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851050 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851083 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851109 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851154 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851182 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851217 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851244 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851269 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851293 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851323 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851351 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851382 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851407 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851433 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851456 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851482 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851506 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851554 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851587 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851615 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851639 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851665 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851692 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851718 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851746 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851769 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851793 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851828 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851854 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851878 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851902 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851929 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851973 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851996 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852019 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852040 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852066 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852091 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852137 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852159 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852180 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852204 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852226 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852254 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852279 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852304 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852328 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852351 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852375 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852396 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852438 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852460 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852480 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852502 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852524 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852546 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852570 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852595 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852617 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852644 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852667 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852691 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852760 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852787 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852816 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852856 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852962 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852987 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.853013 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.853040 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.853068 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.853091 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.853114 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.853164 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.853190 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.853214 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.853237 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.853260 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.853282 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.853305 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.853326 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.855367 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.855464 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.855516 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.855599 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.855638 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.855674 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.855709 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.855749 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.855791 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.855839 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.855876 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.855921 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.855955 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.855982 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856020 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856046 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856072 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856096 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856144 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856168 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856190 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856213 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856238 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856262 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856291 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856313 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856338 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856360 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856382 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856407 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856429 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856455 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856478 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856501 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856530 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856557 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856580 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856602 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856626 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856650 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856675 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856737 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856771 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856802 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856830 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856860 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856893 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856922 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856951 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856974 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857003 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857034 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857057 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857085 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857109 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857206 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857223 4910 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857237 4910 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857254 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857268 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857283 4910 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857296 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857310 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857323 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857340 4910 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857355 4910 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857369 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857383 4910 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857397 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857411 4910 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857425 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857440 4910 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857456 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857471 4910 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857485 4910 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857498 4910 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857510 4910 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857522 4910 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857538 4910 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857551 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857564 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857577 4910 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857591 4910 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857604 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857618 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857632 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857646 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857660 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850770 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.850975 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851015 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851247 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857853 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851366 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851402 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851468 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.851513 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852746 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852738 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.852644 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856111 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856319 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856465 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856711 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.856898 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857194 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857189 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857159 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.857842 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.858273 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.858694 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.858347 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.858744 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.858743 4910 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.858904 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.858935 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.859109 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.859202 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.859337 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.859400 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.859683 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.860559 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.860665 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.860882 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.860984 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.861229 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.861469 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.861576 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.861611 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.861631 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.861674 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.867246 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.861788 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.862082 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.862334 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.862430 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.862519 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.862557 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.862896 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.863376 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.863512 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.863752 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.863946 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.864087 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.864511 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.864498 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.864499 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.864521 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: E0105 21:51:26.864658 4910 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.864858 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.864934 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.864977 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: E0105 21:51:26.865488 4910 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.866907 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.865702 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.864307 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 05 21:51:26 crc kubenswrapper[4910]: E0105 21:51:26.867936 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-05 21:51:27.367900038 +0000 UTC m=+18.945397718 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 05 21:51:26 crc kubenswrapper[4910]: E0105 21:51:26.868029 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-05 21:51:27.368015521 +0000 UTC m=+18.945513461 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.868448 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.868717 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.869232 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.869524 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.869550 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.869899 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.870314 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: E0105 21:51:26.870427 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:51:27.37040594 +0000 UTC m=+18.947903830 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.871327 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.873661 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.874325 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.874715 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.875101 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.875735 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.876084 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.879266 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.880163 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.888851 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.896048 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.895801 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.896534 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.896849 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.897069 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.897255 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.897325 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.897433 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.897612 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.897809 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.897835 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.897849 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.898040 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.898147 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.899214 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.899350 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.899385 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.899697 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.899859 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.899939 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.900560 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.900586 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.901015 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: E0105 21:51:26.901192 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 05 21:51:26 crc kubenswrapper[4910]: E0105 21:51:26.901214 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 05 21:51:26 crc kubenswrapper[4910]: E0105 21:51:26.901228 4910 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 05 21:51:26 crc kubenswrapper[4910]: E0105 21:51:26.901311 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-05 21:51:27.401285714 +0000 UTC m=+18.978783384 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.901484 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.904381 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.904529 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.904589 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.905418 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.905595 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.905883 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.905948 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.906191 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.906322 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.906349 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.906644 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: E0105 21:51:26.906740 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 05 21:51:26 crc kubenswrapper[4910]: E0105 21:51:26.906789 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.906801 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: E0105 21:51:26.906812 4910 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.906873 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.906726 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.906981 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: E0105 21:51:26.907154 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-05 21:51:27.406969955 +0000 UTC m=+18.984467665 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.907220 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.907485 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.907501 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.908279 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.909065 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.912508 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.912537 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.912834 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.913815 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.914319 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.914682 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.914739 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.914836 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.914951 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.915015 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.915046 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.915958 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.922099 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.922861 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.923313 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.924394 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.924479 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.924694 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.924934 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.925190 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.925245 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.925343 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.925442 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.925972 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.926396 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.926258 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.926854 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.926882 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.926963 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.926993 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.927056 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.927053 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.927183 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.928861 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.929136 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.929311 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.929420 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.930570 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.930597 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.930727 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.938382 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.940945 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.946504 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.950078 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.953725 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.958884 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959010 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959128 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959127 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959365 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959398 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959413 4910 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959431 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959447 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959461 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959475 4910 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959489 4910 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959503 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959517 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959531 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959545 4910 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959557 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959569 4910 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959582 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959598 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959611 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959624 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959636 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959649 4910 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959662 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959699 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959711 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959724 4910 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959736 4910 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959750 4910 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959763 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959777 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959789 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959802 4910 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959814 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959827 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959840 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959854 4910 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959866 4910 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959879 4910 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959891 4910 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959903 4910 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959915 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959927 4910 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959942 4910 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959954 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959967 4910 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959981 4910 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.959991 4910 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960003 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960016 4910 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960027 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960039 4910 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960051 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960062 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960074 4910 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960089 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960101 4910 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960114 4910 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960146 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960158 4910 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960171 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960184 4910 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960196 4910 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960208 4910 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960220 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960233 4910 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960245 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960257 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960269 4910 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960282 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960296 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960308 4910 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960323 4910 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960336 4910 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960348 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960359 4910 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960374 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960389 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960400 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960412 4910 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960425 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960436 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960449 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960460 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960473 4910 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960485 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960499 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960510 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960523 4910 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960536 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960549 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960561 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960573 4910 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960584 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960596 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960608 4910 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960620 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960631 4910 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960645 4910 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960657 4910 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960669 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960682 4910 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960695 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960706 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960718 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960730 4910 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960744 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960755 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960768 4910 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960781 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960795 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960807 4910 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960819 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960830 4910 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960842 4910 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960855 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960867 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960879 4910 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960894 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960907 4910 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960919 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960930 4910 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960942 4910 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960953 4910 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960965 4910 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960977 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.960989 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961002 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961014 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961025 4910 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961037 4910 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961048 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961061 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961073 4910 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961086 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961098 4910 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961111 4910 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961142 4910 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961155 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961166 4910 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961180 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961193 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961205 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961216 4910 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961228 4910 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961241 4910 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961287 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961716 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961796 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961808 4910 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961820 4910 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961832 4910 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961847 4910 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961858 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961869 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961881 4910 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961893 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961903 4910 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961915 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961926 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961927 4910 scope.go:117] "RemoveContainer" containerID="9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961936 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.962010 4910 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.962023 4910 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.961817 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.972105 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.981132 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.983091 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.987439 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 21:51:26 crc kubenswrapper[4910]: I0105 21:51:26.998381 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 05 21:51:27 crc kubenswrapper[4910]: I0105 21:51:27.003753 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 05 21:51:27 crc kubenswrapper[4910]: W0105 21:51:27.019636 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-e6186f81acf98382c937824a4d4f3b9c959cd465cbf11426ecd78f5fda372b9e WatchSource:0}: Error finding container e6186f81acf98382c937824a4d4f3b9c959cd465cbf11426ecd78f5fda372b9e: Status 404 returned error can't find the container with id e6186f81acf98382c937824a4d4f3b9c959cd465cbf11426ecd78f5fda372b9e Jan 05 21:51:27 crc kubenswrapper[4910]: W0105 21:51:27.020602 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-4e30543f106a22c65f27ca5a6a4f1a486b5517c745b94542db32b13899327a38 WatchSource:0}: Error finding container 4e30543f106a22c65f27ca5a6a4f1a486b5517c745b94542db32b13899327a38: Status 404 returned error can't find the container with id 4e30543f106a22c65f27ca5a6a4f1a486b5517c745b94542db32b13899327a38 Jan 05 21:51:27 crc kubenswrapper[4910]: I0105 21:51:27.062989 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:27 crc kubenswrapper[4910]: I0105 21:51:27.063043 4910 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 05 21:51:27 crc kubenswrapper[4910]: I0105 21:51:27.467080 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:51:27 crc kubenswrapper[4910]: I0105 21:51:27.467264 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:51:27 crc kubenswrapper[4910]: E0105 21:51:27.467351 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:51:28.46730697 +0000 UTC m=+20.044804650 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:51:27 crc kubenswrapper[4910]: I0105 21:51:27.467446 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:51:27 crc kubenswrapper[4910]: E0105 21:51:27.467513 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 05 21:51:27 crc kubenswrapper[4910]: E0105 21:51:27.467565 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 05 21:51:27 crc kubenswrapper[4910]: E0105 21:51:27.467585 4910 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 05 21:51:27 crc kubenswrapper[4910]: E0105 21:51:27.467591 4910 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 05 21:51:27 crc kubenswrapper[4910]: E0105 21:51:27.467652 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 05 21:51:27 crc kubenswrapper[4910]: E0105 21:51:27.467673 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 05 21:51:27 crc kubenswrapper[4910]: E0105 21:51:27.467688 4910 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 05 21:51:27 crc kubenswrapper[4910]: I0105 21:51:27.467524 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:51:27 crc kubenswrapper[4910]: E0105 21:51:27.467654 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-05 21:51:28.467628808 +0000 UTC m=+20.045126478 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 05 21:51:27 crc kubenswrapper[4910]: E0105 21:51:27.467756 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-05 21:51:28.467741301 +0000 UTC m=+20.045238991 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 05 21:51:27 crc kubenswrapper[4910]: E0105 21:51:27.467773 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-05 21:51:28.467764771 +0000 UTC m=+20.045262461 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 05 21:51:27 crc kubenswrapper[4910]: I0105 21:51:27.467801 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:51:27 crc kubenswrapper[4910]: E0105 21:51:27.467889 4910 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 05 21:51:27 crc kubenswrapper[4910]: E0105 21:51:27.467929 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-05 21:51:28.467919975 +0000 UTC m=+20.045417655 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 05 21:51:27 crc kubenswrapper[4910]: I0105 21:51:27.848091 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"e6186f81acf98382c937824a4d4f3b9c959cd465cbf11426ecd78f5fda372b9e"} Jan 05 21:51:27 crc kubenswrapper[4910]: I0105 21:51:27.850823 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1"} Jan 05 21:51:27 crc kubenswrapper[4910]: I0105 21:51:27.850878 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"4e4255b411fdd22eaecd18574c198f050762e27555893e0f5a3e16ad54760b13"} Jan 05 21:51:27 crc kubenswrapper[4910]: I0105 21:51:27.853433 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 05 21:51:27 crc kubenswrapper[4910]: I0105 21:51:27.855729 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199"} Jan 05 21:51:27 crc kubenswrapper[4910]: I0105 21:51:27.855946 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:51:27 crc kubenswrapper[4910]: I0105 21:51:27.857806 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969"} Jan 05 21:51:27 crc kubenswrapper[4910]: I0105 21:51:27.857904 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09"} Jan 05 21:51:27 crc kubenswrapper[4910]: I0105 21:51:27.857925 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"4e30543f106a22c65f27ca5a6a4f1a486b5517c745b94542db32b13899327a38"} Jan 05 21:51:27 crc kubenswrapper[4910]: I0105 21:51:27.882890 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:27Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:27 crc kubenswrapper[4910]: I0105 21:51:27.903586 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:27Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:27 crc kubenswrapper[4910]: I0105 21:51:27.926216 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:27Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:27 crc kubenswrapper[4910]: I0105 21:51:27.943799 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:27Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:27 crc kubenswrapper[4910]: I0105 21:51:27.964950 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:27Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:27 crc kubenswrapper[4910]: I0105 21:51:27.991588 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:27Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.004382 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.026189 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.040431 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.061672 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.086049 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.100876 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.118362 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.132734 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.476864 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.476928 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.476959 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.476981 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:51:28 crc kubenswrapper[4910]: E0105 21:51:28.477066 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:51:30.477030529 +0000 UTC m=+22.054528199 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:51:28 crc kubenswrapper[4910]: E0105 21:51:28.477091 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 05 21:51:28 crc kubenswrapper[4910]: E0105 21:51:28.477133 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 05 21:51:28 crc kubenswrapper[4910]: E0105 21:51:28.477147 4910 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 05 21:51:28 crc kubenswrapper[4910]: E0105 21:51:28.477142 4910 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.477186 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:51:28 crc kubenswrapper[4910]: E0105 21:51:28.477162 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 05 21:51:28 crc kubenswrapper[4910]: E0105 21:51:28.477224 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-05 21:51:30.477206113 +0000 UTC m=+22.054703783 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 05 21:51:28 crc kubenswrapper[4910]: E0105 21:51:28.477227 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 05 21:51:28 crc kubenswrapper[4910]: E0105 21:51:28.477245 4910 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 05 21:51:28 crc kubenswrapper[4910]: E0105 21:51:28.477245 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-05 21:51:30.477237104 +0000 UTC m=+22.054734764 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 05 21:51:28 crc kubenswrapper[4910]: E0105 21:51:28.477277 4910 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 05 21:51:28 crc kubenswrapper[4910]: E0105 21:51:28.477292 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-05 21:51:30.477271845 +0000 UTC m=+22.054769505 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 05 21:51:28 crc kubenswrapper[4910]: E0105 21:51:28.477316 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-05 21:51:30.477304916 +0000 UTC m=+22.054802586 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.720514 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.720692 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:51:28 crc kubenswrapper[4910]: E0105 21:51:28.720815 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.720699 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:51:28 crc kubenswrapper[4910]: E0105 21:51:28.720915 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:51:28 crc kubenswrapper[4910]: E0105 21:51:28.720912 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.725608 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.726680 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.728525 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.729447 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.730695 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.731287 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.731890 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.732845 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.733822 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.735179 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.736215 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.737689 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.738412 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.738748 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.739811 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.740892 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.744182 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.745429 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.747264 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.748433 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.749580 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.751786 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.751761 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.752989 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.755262 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.756605 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.757443 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.759717 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.761954 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.762953 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.765804 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.766153 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.766778 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.767364 4910 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.767585 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.769078 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.770616 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.771267 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.772822 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.773848 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.774393 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.775463 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.776088 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.776909 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.777517 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.778644 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.779340 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.779856 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.780183 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.780796 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.781779 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.782704 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.783625 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.784065 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.784925 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.785471 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.786065 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.786962 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.794228 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.812578 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:28 crc kubenswrapper[4910]: I0105 21:51:28.825190 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.842269 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.843983 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.844024 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.844035 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.844105 4910 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.851720 4910 kubelet_node_status.go:115] "Node was previously registered" node="crc" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.851997 4910 kubelet_node_status.go:79] "Successfully registered node" node="crc" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.853040 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.853089 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.853106 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.853156 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.853174 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:29Z","lastTransitionTime":"2026-01-05T21:51:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.864834 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d"} Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.879656 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:29 crc kubenswrapper[4910]: E0105 21:51:29.885232 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.889767 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.889800 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.889812 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.889831 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.889845 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:29Z","lastTransitionTime":"2026-01-05T21:51:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.900091 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:29 crc kubenswrapper[4910]: E0105 21:51:29.904596 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.908816 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.908855 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.908869 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.908888 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.908900 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:29Z","lastTransitionTime":"2026-01-05T21:51:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.919072 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:29 crc kubenswrapper[4910]: E0105 21:51:29.929985 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.932388 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.933437 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.933544 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.933616 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.933687 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.933749 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:29Z","lastTransitionTime":"2026-01-05T21:51:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.945910 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:29 crc kubenswrapper[4910]: E0105 21:51:29.950822 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.954593 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.954717 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.954807 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.954894 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.954978 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:29Z","lastTransitionTime":"2026-01-05T21:51:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.959336 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:29 crc kubenswrapper[4910]: E0105 21:51:29.967838 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:29 crc kubenswrapper[4910]: E0105 21:51:29.968069 4910 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.969707 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.969757 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.969780 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.969840 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.969864 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:29Z","lastTransitionTime":"2026-01-05T21:51:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.971671 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.976229 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.982065 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.986441 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Jan 05 21:51:29 crc kubenswrapper[4910]: I0105 21:51:29.996085 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.011031 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:30Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.026510 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:30Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.048936 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:30Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.064496 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:30Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.072703 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.072989 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.073193 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.073344 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.073476 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:30Z","lastTransitionTime":"2026-01-05T21:51:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.080337 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:30Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.094297 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:30Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.111407 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:30Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.123162 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:30Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.140692 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:30Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.160461 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:30Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.176811 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.176850 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.176860 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.176878 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.176891 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:30Z","lastTransitionTime":"2026-01-05T21:51:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.181621 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:30Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.197504 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:30Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.212407 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:30Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.227057 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:30Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.278663 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.278702 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.278713 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.278729 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.278740 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:30Z","lastTransitionTime":"2026-01-05T21:51:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.380949 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.381006 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.381020 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.381044 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.381057 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:30Z","lastTransitionTime":"2026-01-05T21:51:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.483461 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.483513 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.483523 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.483542 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.483552 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:30Z","lastTransitionTime":"2026-01-05T21:51:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.495418 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.495560 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.495669 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.495729 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:51:30 crc kubenswrapper[4910]: E0105 21:51:30.495769 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:51:34.495720089 +0000 UTC m=+26.073217799 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:51:30 crc kubenswrapper[4910]: E0105 21:51:30.495849 4910 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 05 21:51:30 crc kubenswrapper[4910]: E0105 21:51:30.495888 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 05 21:51:30 crc kubenswrapper[4910]: E0105 21:51:30.495914 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 05 21:51:30 crc kubenswrapper[4910]: E0105 21:51:30.495926 4910 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 05 21:51:30 crc kubenswrapper[4910]: E0105 21:51:30.495942 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-05 21:51:34.495919904 +0000 UTC m=+26.073417574 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 05 21:51:30 crc kubenswrapper[4910]: E0105 21:51:30.495952 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 05 21:51:30 crc kubenswrapper[4910]: E0105 21:51:30.495982 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-05 21:51:34.495961165 +0000 UTC m=+26.073458835 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 05 21:51:30 crc kubenswrapper[4910]: E0105 21:51:30.495984 4910 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.495859 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:51:30 crc kubenswrapper[4910]: E0105 21:51:30.496067 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-05 21:51:34.496050487 +0000 UTC m=+26.073548187 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 05 21:51:30 crc kubenswrapper[4910]: E0105 21:51:30.495994 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 05 21:51:30 crc kubenswrapper[4910]: E0105 21:51:30.496111 4910 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 05 21:51:30 crc kubenswrapper[4910]: E0105 21:51:30.496158 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-05 21:51:34.49614796 +0000 UTC m=+26.073645840 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.585892 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.585944 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.585960 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.585983 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.585999 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:30Z","lastTransitionTime":"2026-01-05T21:51:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.688691 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.688760 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.688774 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.688797 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.688812 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:30Z","lastTransitionTime":"2026-01-05T21:51:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.721036 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.721066 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:51:30 crc kubenswrapper[4910]: E0105 21:51:30.721211 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:51:30 crc kubenswrapper[4910]: E0105 21:51:30.721309 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.721066 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:51:30 crc kubenswrapper[4910]: E0105 21:51:30.721746 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.791843 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.791913 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.791927 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.791947 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.791960 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:30Z","lastTransitionTime":"2026-01-05T21:51:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.852321 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.864798 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:30Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.868748 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.870277 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.879638 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:30Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.894505 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.894561 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.894575 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.894595 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.894605 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:30Z","lastTransitionTime":"2026-01-05T21:51:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.896612 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:30Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.910509 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:30Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.923593 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:30Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.937001 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:30Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.950062 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:30Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.964142 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:30Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.979690 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:30Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.993449 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:30Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.997212 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.997276 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.997292 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.997317 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:30 crc kubenswrapper[4910]: I0105 21:51:30.997330 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:30Z","lastTransitionTime":"2026-01-05T21:51:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.005627 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:31Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.025505 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:31Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.062662 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:31Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.099621 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.099668 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.099678 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.099695 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.099707 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:31Z","lastTransitionTime":"2026-01-05T21:51:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.107272 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:31Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.122534 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:31Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.134807 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:31Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.146757 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:31Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.202111 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.202181 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.202195 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.202218 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.202233 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:31Z","lastTransitionTime":"2026-01-05T21:51:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.304384 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.304428 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.304463 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.304485 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.304498 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:31Z","lastTransitionTime":"2026-01-05T21:51:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.407325 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.407374 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.407384 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.407402 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.407416 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:31Z","lastTransitionTime":"2026-01-05T21:51:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.427460 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-6566d"] Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.427814 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-6566d" Jan 05 21:51:31 crc kubenswrapper[4910]: W0105 21:51:31.431973 4910 reflector.go:561] object-"openshift-dns"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-dns": no relationship found between node 'crc' and this object Jan 05 21:51:31 crc kubenswrapper[4910]: E0105 21:51:31.432434 4910 reflector.go:158] "Unhandled Error" err="object-\"openshift-dns\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-dns\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 05 21:51:31 crc kubenswrapper[4910]: W0105 21:51:31.433384 4910 reflector.go:561] object-"openshift-dns"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-dns": no relationship found between node 'crc' and this object Jan 05 21:51:31 crc kubenswrapper[4910]: E0105 21:51:31.433406 4910 reflector.go:158] "Unhandled Error" err="object-\"openshift-dns\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-dns\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 05 21:51:31 crc kubenswrapper[4910]: W0105 21:51:31.435337 4910 reflector.go:561] object-"openshift-dns"/"node-resolver-dockercfg-kz9s7": failed to list *v1.Secret: secrets "node-resolver-dockercfg-kz9s7" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-dns": no relationship found between node 'crc' and this object Jan 05 21:51:31 crc kubenswrapper[4910]: E0105 21:51:31.435393 4910 reflector.go:158] "Unhandled Error" err="object-\"openshift-dns\"/\"node-resolver-dockercfg-kz9s7\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"node-resolver-dockercfg-kz9s7\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-dns\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.460352 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:31Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.502451 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:31Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.502918 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9n8r\" (UniqueName: \"kubernetes.io/projected/49dcd7ad-de44-4aa1-ba88-b7377edbdf0b-kube-api-access-p9n8r\") pod \"node-resolver-6566d\" (UID: \"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\") " pod="openshift-dns/node-resolver-6566d" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.502955 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/49dcd7ad-de44-4aa1-ba88-b7377edbdf0b-hosts-file\") pod \"node-resolver-6566d\" (UID: \"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\") " pod="openshift-dns/node-resolver-6566d" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.509517 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.509547 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.509557 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.509571 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.509582 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:31Z","lastTransitionTime":"2026-01-05T21:51:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.529925 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:31Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.546592 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:31Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.558675 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:31Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.577538 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:31Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.593045 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:31Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.603694 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9n8r\" (UniqueName: \"kubernetes.io/projected/49dcd7ad-de44-4aa1-ba88-b7377edbdf0b-kube-api-access-p9n8r\") pod \"node-resolver-6566d\" (UID: \"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\") " pod="openshift-dns/node-resolver-6566d" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.603740 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/49dcd7ad-de44-4aa1-ba88-b7377edbdf0b-hosts-file\") pod \"node-resolver-6566d\" (UID: \"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\") " pod="openshift-dns/node-resolver-6566d" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.603823 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/49dcd7ad-de44-4aa1-ba88-b7377edbdf0b-hosts-file\") pod \"node-resolver-6566d\" (UID: \"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\") " pod="openshift-dns/node-resolver-6566d" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.606022 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:31Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.611456 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.611899 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.611977 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.612058 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.612146 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:31Z","lastTransitionTime":"2026-01-05T21:51:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.624970 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:31Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.639328 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:31Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.714700 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.715046 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.715182 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.715376 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.715479 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:31Z","lastTransitionTime":"2026-01-05T21:51:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.818180 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.818225 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.818234 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.818252 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.818273 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:31Z","lastTransitionTime":"2026-01-05T21:51:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.824806 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-9zscm"] Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.825480 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-9zscm" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.829564 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.829613 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.829961 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.833550 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.833909 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.847795 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:31Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.861613 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:31Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.880001 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:31Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.891506 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:31Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.905309 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:31Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.906594 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-multus-cni-dir\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.906639 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-multus-socket-dir-parent\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.906668 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-host-var-lib-cni-multus\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.906814 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-hostroot\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.906870 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-host-run-netns\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.906896 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-host-var-lib-kubelet\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.906938 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-os-release\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.906960 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/07ebbe82-9e6e-47a5-91a7-4b515efc78db-multus-daemon-config\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.906998 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-multus-conf-dir\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.907018 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-host-run-multus-certs\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.907042 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-cnibin\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.907062 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/07ebbe82-9e6e-47a5-91a7-4b515efc78db-cni-binary-copy\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.907078 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-host-var-lib-cni-bin\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.907094 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kh5qp\" (UniqueName: \"kubernetes.io/projected/07ebbe82-9e6e-47a5-91a7-4b515efc78db-kube-api-access-kh5qp\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.907121 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-host-run-k8s-cni-cncf-io\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.907153 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-etc-kubernetes\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.907175 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-system-cni-dir\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.920341 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.920374 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.920384 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.920401 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.920412 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:31Z","lastTransitionTime":"2026-01-05T21:51:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.923620 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:31Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.969080 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:31Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.986942 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:31Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:31 crc kubenswrapper[4910]: I0105 21:51:31.999885 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:31Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.007985 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-multus-conf-dir\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.008018 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-host-run-multus-certs\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.008037 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-cnibin\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.008056 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/07ebbe82-9e6e-47a5-91a7-4b515efc78db-cni-binary-copy\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.008077 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-host-var-lib-cni-bin\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.008094 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kh5qp\" (UniqueName: \"kubernetes.io/projected/07ebbe82-9e6e-47a5-91a7-4b515efc78db-kube-api-access-kh5qp\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.008113 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-host-run-k8s-cni-cncf-io\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.008146 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-etc-kubernetes\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.008163 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-system-cni-dir\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.008160 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-host-run-multus-certs\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.008185 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-multus-cni-dir\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.008206 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-host-var-lib-cni-bin\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.008237 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-multus-socket-dir-parent\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.008251 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-multus-cni-dir\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.008236 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-multus-conf-dir\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.008273 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-etc-kubernetes\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.008294 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-system-cni-dir\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.008295 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-host-var-lib-cni-multus\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.008265 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-host-var-lib-cni-multus\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.008315 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-host-run-k8s-cni-cncf-io\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.008378 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-hostroot\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.008400 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-host-run-netns\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.008349 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-multus-socket-dir-parent\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.008426 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-hostroot\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.008342 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-cnibin\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.008442 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-host-var-lib-kubelet\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.008419 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-host-var-lib-kubelet\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.008455 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-host-run-netns\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.008556 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-os-release\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.008592 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/07ebbe82-9e6e-47a5-91a7-4b515efc78db-multus-daemon-config\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.008638 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/07ebbe82-9e6e-47a5-91a7-4b515efc78db-os-release\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.008804 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/07ebbe82-9e6e-47a5-91a7-4b515efc78db-cni-binary-copy\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.009216 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/07ebbe82-9e6e-47a5-91a7-4b515efc78db-multus-daemon-config\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.023332 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.023373 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.023383 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.023399 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.023410 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:32Z","lastTransitionTime":"2026-01-05T21:51:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.024336 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.035587 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kh5qp\" (UniqueName: \"kubernetes.io/projected/07ebbe82-9e6e-47a5-91a7-4b515efc78db-kube-api-access-kh5qp\") pod \"multus-9zscm\" (UID: \"07ebbe82-9e6e-47a5-91a7-4b515efc78db\") " pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.061534 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.126323 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.126368 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.126378 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.126395 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.126407 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:32Z","lastTransitionTime":"2026-01-05T21:51:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.136871 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-9zscm" Jan 05 21:51:32 crc kubenswrapper[4910]: W0105 21:51:32.157567 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod07ebbe82_9e6e_47a5_91a7_4b515efc78db.slice/crio-5eccff8e97b0587478e91054ab069ef0d8fd4129fe6730c1ecb22a2d37e30ede WatchSource:0}: Error finding container 5eccff8e97b0587478e91054ab069ef0d8fd4129fe6730c1ecb22a2d37e30ede: Status 404 returned error can't find the container with id 5eccff8e97b0587478e91054ab069ef0d8fd4129fe6730c1ecb22a2d37e30ede Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.228166 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-p4t85"] Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.228523 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-hflcr"] Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.228684 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.228919 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.228959 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.228970 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.228991 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.229002 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:32Z","lastTransitionTime":"2026-01-05T21:51:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.229503 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-hflcr" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.230634 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.230717 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.231104 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.231747 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.233606 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.234903 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.234973 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.244140 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.256858 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.273074 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.287219 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.302433 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.311793 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/474e7e8c-c9f5-4f54-81c9-0976bcc6565d-system-cni-dir\") pod \"multus-additional-cni-plugins-hflcr\" (UID: \"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\") " pod="openshift-multus/multus-additional-cni-plugins-hflcr" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.311858 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/474e7e8c-c9f5-4f54-81c9-0976bcc6565d-tuning-conf-dir\") pod \"multus-additional-cni-plugins-hflcr\" (UID: \"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\") " pod="openshift-multus/multus-additional-cni-plugins-hflcr" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.311890 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/1180e67b-86e7-4aa8-b84f-55e2a18a7918-proxy-tls\") pod \"machine-config-daemon-p4t85\" (UID: \"1180e67b-86e7-4aa8-b84f-55e2a18a7918\") " pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.311972 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1180e67b-86e7-4aa8-b84f-55e2a18a7918-mcd-auth-proxy-config\") pod \"machine-config-daemon-p4t85\" (UID: \"1180e67b-86e7-4aa8-b84f-55e2a18a7918\") " pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.311996 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/474e7e8c-c9f5-4f54-81c9-0976bcc6565d-cnibin\") pod \"multus-additional-cni-plugins-hflcr\" (UID: \"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\") " pod="openshift-multus/multus-additional-cni-plugins-hflcr" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.312027 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/474e7e8c-c9f5-4f54-81c9-0976bcc6565d-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-hflcr\" (UID: \"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\") " pod="openshift-multus/multus-additional-cni-plugins-hflcr" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.312062 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/474e7e8c-c9f5-4f54-81c9-0976bcc6565d-cni-binary-copy\") pod \"multus-additional-cni-plugins-hflcr\" (UID: \"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\") " pod="openshift-multus/multus-additional-cni-plugins-hflcr" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.312209 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2k6mj\" (UniqueName: \"kubernetes.io/projected/474e7e8c-c9f5-4f54-81c9-0976bcc6565d-kube-api-access-2k6mj\") pod \"multus-additional-cni-plugins-hflcr\" (UID: \"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\") " pod="openshift-multus/multus-additional-cni-plugins-hflcr" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.312263 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/1180e67b-86e7-4aa8-b84f-55e2a18a7918-rootfs\") pod \"machine-config-daemon-p4t85\" (UID: \"1180e67b-86e7-4aa8-b84f-55e2a18a7918\") " pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.312292 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/474e7e8c-c9f5-4f54-81c9-0976bcc6565d-os-release\") pod \"multus-additional-cni-plugins-hflcr\" (UID: \"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\") " pod="openshift-multus/multus-additional-cni-plugins-hflcr" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.312322 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqx46\" (UniqueName: \"kubernetes.io/projected/1180e67b-86e7-4aa8-b84f-55e2a18a7918-kube-api-access-gqx46\") pod \"machine-config-daemon-p4t85\" (UID: \"1180e67b-86e7-4aa8-b84f-55e2a18a7918\") " pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.322793 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.332167 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.332218 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.332228 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.332248 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.332260 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:32Z","lastTransitionTime":"2026-01-05T21:51:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.339817 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.356546 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.373861 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.396451 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.409969 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.413111 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/474e7e8c-c9f5-4f54-81c9-0976bcc6565d-os-release\") pod \"multus-additional-cni-plugins-hflcr\" (UID: \"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\") " pod="openshift-multus/multus-additional-cni-plugins-hflcr" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.413265 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqx46\" (UniqueName: \"kubernetes.io/projected/1180e67b-86e7-4aa8-b84f-55e2a18a7918-kube-api-access-gqx46\") pod \"machine-config-daemon-p4t85\" (UID: \"1180e67b-86e7-4aa8-b84f-55e2a18a7918\") " pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.413275 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/474e7e8c-c9f5-4f54-81c9-0976bcc6565d-os-release\") pod \"multus-additional-cni-plugins-hflcr\" (UID: \"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\") " pod="openshift-multus/multus-additional-cni-plugins-hflcr" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.413311 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/474e7e8c-c9f5-4f54-81c9-0976bcc6565d-system-cni-dir\") pod \"multus-additional-cni-plugins-hflcr\" (UID: \"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\") " pod="openshift-multus/multus-additional-cni-plugins-hflcr" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.413366 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/474e7e8c-c9f5-4f54-81c9-0976bcc6565d-tuning-conf-dir\") pod \"multus-additional-cni-plugins-hflcr\" (UID: \"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\") " pod="openshift-multus/multus-additional-cni-plugins-hflcr" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.413401 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/1180e67b-86e7-4aa8-b84f-55e2a18a7918-proxy-tls\") pod \"machine-config-daemon-p4t85\" (UID: \"1180e67b-86e7-4aa8-b84f-55e2a18a7918\") " pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.413480 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/474e7e8c-c9f5-4f54-81c9-0976bcc6565d-system-cni-dir\") pod \"multus-additional-cni-plugins-hflcr\" (UID: \"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\") " pod="openshift-multus/multus-additional-cni-plugins-hflcr" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.413504 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1180e67b-86e7-4aa8-b84f-55e2a18a7918-mcd-auth-proxy-config\") pod \"machine-config-daemon-p4t85\" (UID: \"1180e67b-86e7-4aa8-b84f-55e2a18a7918\") " pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.413558 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/474e7e8c-c9f5-4f54-81c9-0976bcc6565d-cnibin\") pod \"multus-additional-cni-plugins-hflcr\" (UID: \"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\") " pod="openshift-multus/multus-additional-cni-plugins-hflcr" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.413581 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/474e7e8c-c9f5-4f54-81c9-0976bcc6565d-tuning-conf-dir\") pod \"multus-additional-cni-plugins-hflcr\" (UID: \"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\") " pod="openshift-multus/multus-additional-cni-plugins-hflcr" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.413622 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/474e7e8c-c9f5-4f54-81c9-0976bcc6565d-cnibin\") pod \"multus-additional-cni-plugins-hflcr\" (UID: \"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\") " pod="openshift-multus/multus-additional-cni-plugins-hflcr" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.413597 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/474e7e8c-c9f5-4f54-81c9-0976bcc6565d-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-hflcr\" (UID: \"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\") " pod="openshift-multus/multus-additional-cni-plugins-hflcr" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.413681 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/474e7e8c-c9f5-4f54-81c9-0976bcc6565d-cni-binary-copy\") pod \"multus-additional-cni-plugins-hflcr\" (UID: \"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\") " pod="openshift-multus/multus-additional-cni-plugins-hflcr" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.413709 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2k6mj\" (UniqueName: \"kubernetes.io/projected/474e7e8c-c9f5-4f54-81c9-0976bcc6565d-kube-api-access-2k6mj\") pod \"multus-additional-cni-plugins-hflcr\" (UID: \"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\") " pod="openshift-multus/multus-additional-cni-plugins-hflcr" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.413735 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/1180e67b-86e7-4aa8-b84f-55e2a18a7918-rootfs\") pod \"machine-config-daemon-p4t85\" (UID: \"1180e67b-86e7-4aa8-b84f-55e2a18a7918\") " pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.413799 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/1180e67b-86e7-4aa8-b84f-55e2a18a7918-rootfs\") pod \"machine-config-daemon-p4t85\" (UID: \"1180e67b-86e7-4aa8-b84f-55e2a18a7918\") " pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.414949 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/474e7e8c-c9f5-4f54-81c9-0976bcc6565d-cni-binary-copy\") pod \"multus-additional-cni-plugins-hflcr\" (UID: \"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\") " pod="openshift-multus/multus-additional-cni-plugins-hflcr" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.415074 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1180e67b-86e7-4aa8-b84f-55e2a18a7918-mcd-auth-proxy-config\") pod \"machine-config-daemon-p4t85\" (UID: \"1180e67b-86e7-4aa8-b84f-55e2a18a7918\") " pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.415264 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/474e7e8c-c9f5-4f54-81c9-0976bcc6565d-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-hflcr\" (UID: \"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\") " pod="openshift-multus/multus-additional-cni-plugins-hflcr" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.418144 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/1180e67b-86e7-4aa8-b84f-55e2a18a7918-proxy-tls\") pod \"machine-config-daemon-p4t85\" (UID: \"1180e67b-86e7-4aa8-b84f-55e2a18a7918\") " pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.427867 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.435369 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.435419 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.435433 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.435453 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.435465 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:32Z","lastTransitionTime":"2026-01-05T21:51:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.437806 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2k6mj\" (UniqueName: \"kubernetes.io/projected/474e7e8c-c9f5-4f54-81c9-0976bcc6565d-kube-api-access-2k6mj\") pod \"multus-additional-cni-plugins-hflcr\" (UID: \"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\") " pod="openshift-multus/multus-additional-cni-plugins-hflcr" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.443355 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqx46\" (UniqueName: \"kubernetes.io/projected/1180e67b-86e7-4aa8-b84f-55e2a18a7918-kube-api-access-gqx46\") pod \"machine-config-daemon-p4t85\" (UID: \"1180e67b-86e7-4aa8-b84f-55e2a18a7918\") " pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.448427 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.466209 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.478600 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.492616 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.510703 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.524838 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.538504 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.538557 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.538572 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.538591 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.538601 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:32Z","lastTransitionTime":"2026-01-05T21:51:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.540800 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.546147 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-hflcr" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.546030 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: W0105 21:51:32.553769 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1180e67b_86e7_4aa8_b84f_55e2a18a7918.slice/crio-49a50d73600ffb32941bfd932bd0c8b4be630891f78b8202bd4b57dae5618f94 WatchSource:0}: Error finding container 49a50d73600ffb32941bfd932bd0c8b4be630891f78b8202bd4b57dae5618f94: Status 404 returned error can't find the container with id 49a50d73600ffb32941bfd932bd0c8b4be630891f78b8202bd4b57dae5618f94 Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.560246 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: W0105 21:51:32.561925 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod474e7e8c_c9f5_4f54_81c9_0976bcc6565d.slice/crio-18647225c6dca160969195dd04d29e117cb7290ee9434565b34bd1145a5ff1a6 WatchSource:0}: Error finding container 18647225c6dca160969195dd04d29e117cb7290ee9434565b34bd1145a5ff1a6: Status 404 returned error can't find the container with id 18647225c6dca160969195dd04d29e117cb7290ee9434565b34bd1145a5ff1a6 Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.575353 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.588215 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.599142 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-fpk76"] Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.600222 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.602466 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.604477 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.604624 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.604669 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.604783 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.604813 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.604484 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.604801 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.614663 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-run-systemd\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.614699 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-log-socket\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.614722 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-run-ovn-kubernetes\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.614744 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.614763 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-systemd-units\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.614780 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-run-ovn\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.614798 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-ovn-node-metrics-cert\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.615025 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-run-openvswitch\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.615083 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-run-netns\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.615105 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-ovnkube-config\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: E0105 21:51:32.615164 4910 projected.go:288] Couldn't get configMap openshift-dns/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.615169 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-var-lib-openvswitch\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.615258 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-cni-bin\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.615287 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-cni-netd\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.615345 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-kubelet\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.615389 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-ovnkube-script-lib\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.615439 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-node-log\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.615467 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-slash\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.615485 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-env-overrides\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.615537 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-etc-openvswitch\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.615581 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjpvg\" (UniqueName: \"kubernetes.io/projected/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-kube-api-access-gjpvg\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.624562 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.638727 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.640817 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.640853 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.640863 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.640882 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.640905 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:32Z","lastTransitionTime":"2026-01-05T21:51:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.651454 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.666229 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.684173 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.684984 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: E0105 21:51:32.686031 4910 projected.go:194] Error preparing data for projected volume kube-api-access-p9n8r for pod openshift-dns/node-resolver-6566d: failed to sync configmap cache: timed out waiting for the condition Jan 05 21:51:32 crc kubenswrapper[4910]: E0105 21:51:32.686144 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/49dcd7ad-de44-4aa1-ba88-b7377edbdf0b-kube-api-access-p9n8r podName:49dcd7ad-de44-4aa1-ba88-b7377edbdf0b nodeName:}" failed. No retries permitted until 2026-01-05 21:51:33.186105313 +0000 UTC m=+24.763602983 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-p9n8r" (UniqueName: "kubernetes.io/projected/49dcd7ad-de44-4aa1-ba88-b7377edbdf0b-kube-api-access-p9n8r") pod "node-resolver-6566d" (UID: "49dcd7ad-de44-4aa1-ba88-b7377edbdf0b") : failed to sync configmap cache: timed out waiting for the condition Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.696883 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.709506 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.716257 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-cni-bin\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.716304 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-cni-netd\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.716323 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-var-lib-openvswitch\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.716369 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-kubelet\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.716390 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-ovnkube-script-lib\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.716385 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-cni-bin\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.716602 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-node-log\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.716627 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-slash\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.716651 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-env-overrides\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.716689 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-etc-openvswitch\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.716676 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-cni-netd\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.716720 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjpvg\" (UniqueName: \"kubernetes.io/projected/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-kube-api-access-gjpvg\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.716726 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-var-lib-openvswitch\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.716776 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-slash\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.716799 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-node-log\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.716748 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-run-systemd\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.716852 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-etc-openvswitch\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.716879 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-log-socket\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.716915 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-run-ovn-kubernetes\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.716944 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.716976 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-systemd-units\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.717001 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-run-ovn\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.717022 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-ovn-node-metrics-cert\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.717055 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-run-openvswitch\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.717089 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-run-netns\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.717111 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-ovnkube-config\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.717306 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-systemd-units\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.717361 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-log-socket\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.717405 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-ovnkube-script-lib\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.717398 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-run-ovn-kubernetes\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.717472 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.717515 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-run-openvswitch\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.717551 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-run-ovn\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.716702 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-kubelet\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.717602 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-run-netns\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.717672 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-env-overrides\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.717874 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-ovnkube-config\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.717898 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-run-systemd\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.720486 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.720564 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:51:32 crc kubenswrapper[4910]: E0105 21:51:32.720663 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.720797 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.720975 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-ovn-node-metrics-cert\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: E0105 21:51:32.721171 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:51:32 crc kubenswrapper[4910]: E0105 21:51:32.721308 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.724827 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.733501 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.739961 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjpvg\" (UniqueName: \"kubernetes.io/projected/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-kube-api-access-gjpvg\") pod \"ovnkube-node-fpk76\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.740838 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.743521 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.743549 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.743558 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.743573 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.743583 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:32Z","lastTransitionTime":"2026-01-05T21:51:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.756326 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.777458 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.798534 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.817796 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.846923 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.846969 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.846980 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.846999 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.847018 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:32Z","lastTransitionTime":"2026-01-05T21:51:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.849466 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.868116 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.874774 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-9zscm" event={"ID":"07ebbe82-9e6e-47a5-91a7-4b515efc78db","Type":"ContainerStarted","Data":"3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8"} Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.874861 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-9zscm" event={"ID":"07ebbe82-9e6e-47a5-91a7-4b515efc78db","Type":"ContainerStarted","Data":"5eccff8e97b0587478e91054ab069ef0d8fd4129fe6730c1ecb22a2d37e30ede"} Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.876186 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" event={"ID":"474e7e8c-c9f5-4f54-81c9-0976bcc6565d","Type":"ContainerStarted","Data":"e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65"} Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.876245 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" event={"ID":"474e7e8c-c9f5-4f54-81c9-0976bcc6565d","Type":"ContainerStarted","Data":"18647225c6dca160969195dd04d29e117cb7290ee9434565b34bd1145a5ff1a6"} Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.878232 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerStarted","Data":"134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b"} Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.878270 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerStarted","Data":"c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb"} Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.878304 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerStarted","Data":"49a50d73600ffb32941bfd932bd0c8b4be630891f78b8202bd4b57dae5618f94"} Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.886972 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.894540 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.930929 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.932707 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.949126 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.949182 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.949193 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.949214 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.949227 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:32Z","lastTransitionTime":"2026-01-05T21:51:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.953692 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.968033 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.982103 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: I0105 21:51:32.995052 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:32 crc kubenswrapper[4910]: W0105 21:51:32.997069 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85c76d1_cfbe_4c6b_86c1_6d51bd45b42b.slice/crio-3d208d1bec35c3b6cf5bca037630b84c591b8a23c3201b51070386218411a43f WatchSource:0}: Error finding container 3d208d1bec35c3b6cf5bca037630b84c591b8a23c3201b51070386218411a43f: Status 404 returned error can't find the container with id 3d208d1bec35c3b6cf5bca037630b84c591b8a23c3201b51070386218411a43f Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.012080 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:33Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.041931 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:33Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.052783 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.053265 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.053279 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.053304 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.053513 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:33Z","lastTransitionTime":"2026-01-05T21:51:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.057745 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:33Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.071373 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:33Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.092096 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:33Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.106488 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:33Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.116619 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:33Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.130605 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:33Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.146504 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:33Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.156360 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.156416 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.156430 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.156452 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.156466 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:33Z","lastTransitionTime":"2026-01-05T21:51:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.222007 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9n8r\" (UniqueName: \"kubernetes.io/projected/49dcd7ad-de44-4aa1-ba88-b7377edbdf0b-kube-api-access-p9n8r\") pod \"node-resolver-6566d\" (UID: \"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\") " pod="openshift-dns/node-resolver-6566d" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.226788 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9n8r\" (UniqueName: \"kubernetes.io/projected/49dcd7ad-de44-4aa1-ba88-b7377edbdf0b-kube-api-access-p9n8r\") pod \"node-resolver-6566d\" (UID: \"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\") " pod="openshift-dns/node-resolver-6566d" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.239527 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-6566d" Jan 05 21:51:33 crc kubenswrapper[4910]: W0105 21:51:33.253259 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod49dcd7ad_de44_4aa1_ba88_b7377edbdf0b.slice/crio-5add355456aa93bd2c9a413592bfdeda3c23222985a8ab9a556a9185f78610f6 WatchSource:0}: Error finding container 5add355456aa93bd2c9a413592bfdeda3c23222985a8ab9a556a9185f78610f6: Status 404 returned error can't find the container with id 5add355456aa93bd2c9a413592bfdeda3c23222985a8ab9a556a9185f78610f6 Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.262205 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.262243 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.262255 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.262272 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.262284 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:33Z","lastTransitionTime":"2026-01-05T21:51:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.365330 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.365395 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.365409 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.365428 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.365443 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:33Z","lastTransitionTime":"2026-01-05T21:51:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.468717 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.469155 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.469167 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.469182 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.469192 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:33Z","lastTransitionTime":"2026-01-05T21:51:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.572386 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.572421 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.572431 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.572452 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.572465 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:33Z","lastTransitionTime":"2026-01-05T21:51:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.676912 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.676967 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.676979 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.677000 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.677013 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:33Z","lastTransitionTime":"2026-01-05T21:51:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.780218 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.780251 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.780258 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.780275 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.780284 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:33Z","lastTransitionTime":"2026-01-05T21:51:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.881831 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.881877 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.881887 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.881907 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.881918 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:33Z","lastTransitionTime":"2026-01-05T21:51:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.883495 4910 generic.go:334] "Generic (PLEG): container finished" podID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerID="4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95" exitCode=0 Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.883563 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" event={"ID":"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b","Type":"ContainerDied","Data":"4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95"} Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.883618 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" event={"ID":"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b","Type":"ContainerStarted","Data":"3d208d1bec35c3b6cf5bca037630b84c591b8a23c3201b51070386218411a43f"} Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.886577 4910 generic.go:334] "Generic (PLEG): container finished" podID="474e7e8c-c9f5-4f54-81c9-0976bcc6565d" containerID="e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65" exitCode=0 Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.886654 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" event={"ID":"474e7e8c-c9f5-4f54-81c9-0976bcc6565d","Type":"ContainerDied","Data":"e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65"} Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.888636 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-6566d" event={"ID":"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b","Type":"ContainerStarted","Data":"a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7"} Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.888661 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-6566d" event={"ID":"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b","Type":"ContainerStarted","Data":"5add355456aa93bd2c9a413592bfdeda3c23222985a8ab9a556a9185f78610f6"} Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.917590 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:33Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.940551 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:33Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.955181 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:33Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.971521 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:33Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.987017 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.987055 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.987065 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.987083 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.987094 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:33Z","lastTransitionTime":"2026-01-05T21:51:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:33 crc kubenswrapper[4910]: I0105 21:51:33.990389 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:33Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.004685 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:34Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.019167 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:34Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.036321 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:34Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.065057 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:34Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.077972 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:34Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.089678 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:34Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.091330 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.091402 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.091412 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.091432 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.091446 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:34Z","lastTransitionTime":"2026-01-05T21:51:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.102847 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:34Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.108657 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-458lg"] Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.109115 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-458lg" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.110974 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.111429 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.111545 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.111606 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.116149 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:34Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.127036 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:34Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.133088 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e8004e61-6340-451e-899d-da531d593315-host\") pod \"node-ca-458lg\" (UID: \"e8004e61-6340-451e-899d-da531d593315\") " pod="openshift-image-registry/node-ca-458lg" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.133178 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/e8004e61-6340-451e-899d-da531d593315-serviceca\") pod \"node-ca-458lg\" (UID: \"e8004e61-6340-451e-899d-da531d593315\") " pod="openshift-image-registry/node-ca-458lg" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.133206 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ss8ht\" (UniqueName: \"kubernetes.io/projected/e8004e61-6340-451e-899d-da531d593315-kube-api-access-ss8ht\") pod \"node-ca-458lg\" (UID: \"e8004e61-6340-451e-899d-da531d593315\") " pod="openshift-image-registry/node-ca-458lg" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.140559 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:34Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.162543 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:34Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.179610 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:34Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.194203 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.194238 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.194248 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.194265 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.194276 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:34Z","lastTransitionTime":"2026-01-05T21:51:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.195961 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:34Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.210524 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:34Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.226481 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:34Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.234749 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e8004e61-6340-451e-899d-da531d593315-host\") pod \"node-ca-458lg\" (UID: \"e8004e61-6340-451e-899d-da531d593315\") " pod="openshift-image-registry/node-ca-458lg" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.234826 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/e8004e61-6340-451e-899d-da531d593315-serviceca\") pod \"node-ca-458lg\" (UID: \"e8004e61-6340-451e-899d-da531d593315\") " pod="openshift-image-registry/node-ca-458lg" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.234862 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ss8ht\" (UniqueName: \"kubernetes.io/projected/e8004e61-6340-451e-899d-da531d593315-kube-api-access-ss8ht\") pod \"node-ca-458lg\" (UID: \"e8004e61-6340-451e-899d-da531d593315\") " pod="openshift-image-registry/node-ca-458lg" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.234891 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e8004e61-6340-451e-899d-da531d593315-host\") pod \"node-ca-458lg\" (UID: \"e8004e61-6340-451e-899d-da531d593315\") " pod="openshift-image-registry/node-ca-458lg" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.235845 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/e8004e61-6340-451e-899d-da531d593315-serviceca\") pod \"node-ca-458lg\" (UID: \"e8004e61-6340-451e-899d-da531d593315\") " pod="openshift-image-registry/node-ca-458lg" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.243730 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:34Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.255805 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ss8ht\" (UniqueName: \"kubernetes.io/projected/e8004e61-6340-451e-899d-da531d593315-kube-api-access-ss8ht\") pod \"node-ca-458lg\" (UID: \"e8004e61-6340-451e-899d-da531d593315\") " pod="openshift-image-registry/node-ca-458lg" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.258866 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:34Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.273469 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:34Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.288699 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:34Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.295963 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.295989 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.296001 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.296017 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.296027 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:34Z","lastTransitionTime":"2026-01-05T21:51:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.313495 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:34Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.329494 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:34Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.344148 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:34Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.369751 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:34Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.393296 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:34Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.400667 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.400734 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.400754 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.400779 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.400797 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:34Z","lastTransitionTime":"2026-01-05T21:51:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.450344 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-458lg" Jan 05 21:51:34 crc kubenswrapper[4910]: W0105 21:51:34.467781 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode8004e61_6340_451e_899d_da531d593315.slice/crio-ceccad2e028e56bf98500bcefe86cca2acd4c1cbd2f1667f81010552fcb1ea58 WatchSource:0}: Error finding container ceccad2e028e56bf98500bcefe86cca2acd4c1cbd2f1667f81010552fcb1ea58: Status 404 returned error can't find the container with id ceccad2e028e56bf98500bcefe86cca2acd4c1cbd2f1667f81010552fcb1ea58 Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.504635 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.504684 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.504694 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.504714 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.504725 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:34Z","lastTransitionTime":"2026-01-05T21:51:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.538090 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.538279 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.538337 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.538374 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.538410 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:51:34 crc kubenswrapper[4910]: E0105 21:51:34.538586 4910 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 05 21:51:34 crc kubenswrapper[4910]: E0105 21:51:34.538671 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-05 21:51:42.538649103 +0000 UTC m=+34.116146813 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 05 21:51:34 crc kubenswrapper[4910]: E0105 21:51:34.539281 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:51:42.539263498 +0000 UTC m=+34.116761198 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:51:34 crc kubenswrapper[4910]: E0105 21:51:34.539395 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 05 21:51:34 crc kubenswrapper[4910]: E0105 21:51:34.539423 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 05 21:51:34 crc kubenswrapper[4910]: E0105 21:51:34.539444 4910 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 05 21:51:34 crc kubenswrapper[4910]: E0105 21:51:34.539492 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-05 21:51:42.539478774 +0000 UTC m=+34.116976474 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 05 21:51:34 crc kubenswrapper[4910]: E0105 21:51:34.539549 4910 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 05 21:51:34 crc kubenswrapper[4910]: E0105 21:51:34.539595 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-05 21:51:42.539583356 +0000 UTC m=+34.117081056 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 05 21:51:34 crc kubenswrapper[4910]: E0105 21:51:34.539666 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 05 21:51:34 crc kubenswrapper[4910]: E0105 21:51:34.539691 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 05 21:51:34 crc kubenswrapper[4910]: E0105 21:51:34.539707 4910 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 05 21:51:34 crc kubenswrapper[4910]: E0105 21:51:34.539748 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-05 21:51:42.53973606 +0000 UTC m=+34.117233760 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.608237 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.608286 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.608298 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.608320 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.608334 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:34Z","lastTransitionTime":"2026-01-05T21:51:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.720829 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.720951 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:51:34 crc kubenswrapper[4910]: E0105 21:51:34.721009 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:51:34 crc kubenswrapper[4910]: E0105 21:51:34.721186 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.721410 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.721465 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.721477 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.721495 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.721510 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:34Z","lastTransitionTime":"2026-01-05T21:51:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.721590 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:51:34 crc kubenswrapper[4910]: E0105 21:51:34.721674 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.824751 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.824792 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.824800 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.824817 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.824828 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:34Z","lastTransitionTime":"2026-01-05T21:51:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.895271 4910 generic.go:334] "Generic (PLEG): container finished" podID="474e7e8c-c9f5-4f54-81c9-0976bcc6565d" containerID="92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913" exitCode=0 Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.895366 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" event={"ID":"474e7e8c-c9f5-4f54-81c9-0976bcc6565d","Type":"ContainerDied","Data":"92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913"} Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.896968 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-458lg" event={"ID":"e8004e61-6340-451e-899d-da531d593315","Type":"ContainerStarted","Data":"1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668"} Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.897013 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-458lg" event={"ID":"e8004e61-6340-451e-899d-da531d593315","Type":"ContainerStarted","Data":"ceccad2e028e56bf98500bcefe86cca2acd4c1cbd2f1667f81010552fcb1ea58"} Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.900517 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" event={"ID":"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b","Type":"ContainerStarted","Data":"5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d"} Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.900576 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" event={"ID":"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b","Type":"ContainerStarted","Data":"15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947"} Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.900594 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" event={"ID":"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b","Type":"ContainerStarted","Data":"3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4"} Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.900606 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" event={"ID":"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b","Type":"ContainerStarted","Data":"f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620"} Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.900614 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" event={"ID":"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b","Type":"ContainerStarted","Data":"94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062"} Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.900623 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" event={"ID":"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b","Type":"ContainerStarted","Data":"4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3"} Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.910845 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:34Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.924307 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:34Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.928737 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.928790 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.928803 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.928823 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.928842 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:34Z","lastTransitionTime":"2026-01-05T21:51:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.940918 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:34Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.958158 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:34Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.972164 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:34Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:34 crc kubenswrapper[4910]: I0105 21:51:34.985703 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:34Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.001119 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:34Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.016048 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:35Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.028064 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:35Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.032701 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.032754 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.032769 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.032790 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.032803 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:35Z","lastTransitionTime":"2026-01-05T21:51:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.044246 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:35Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.073405 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:35Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.088434 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:35Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.104841 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:35Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.122082 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:35Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.136048 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.136098 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.136109 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.136139 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.136153 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:35Z","lastTransitionTime":"2026-01-05T21:51:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.145013 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:35Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.163052 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:35Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.179391 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:35Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.200395 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:35Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.218849 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:35Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.233277 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:35Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.238672 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.238794 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.238880 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.238968 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.239066 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:35Z","lastTransitionTime":"2026-01-05T21:51:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.250720 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:35Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.263235 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:35Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.305465 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:35Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.344909 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.344954 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.344964 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.345015 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.345026 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:35Z","lastTransitionTime":"2026-01-05T21:51:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.345633 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:35Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.386020 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:35Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.428952 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:35Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.447385 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.447559 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.447635 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.447726 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.447799 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:35Z","lastTransitionTime":"2026-01-05T21:51:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.468874 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:35Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.503675 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:35Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.545514 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:35Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.550294 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.550335 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.550347 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.550364 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.550373 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:35Z","lastTransitionTime":"2026-01-05T21:51:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.584365 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:35Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.653099 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.653164 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.653176 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.653196 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.653206 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:35Z","lastTransitionTime":"2026-01-05T21:51:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.755612 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.755653 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.755666 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.755684 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.755695 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:35Z","lastTransitionTime":"2026-01-05T21:51:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.857936 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.857984 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.857992 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.858011 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.858021 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:35Z","lastTransitionTime":"2026-01-05T21:51:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.909755 4910 generic.go:334] "Generic (PLEG): container finished" podID="474e7e8c-c9f5-4f54-81c9-0976bcc6565d" containerID="a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3" exitCode=0 Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.909886 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" event={"ID":"474e7e8c-c9f5-4f54-81c9-0976bcc6565d","Type":"ContainerDied","Data":"a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3"} Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.932654 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:35Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.950003 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:35Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.961381 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.961426 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.961442 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.961468 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.961482 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:35Z","lastTransitionTime":"2026-01-05T21:51:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.970892 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:35Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:35 crc kubenswrapper[4910]: I0105 21:51:35.988284 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:35Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.008364 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:36Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.030109 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:36Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.047363 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:36Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.062157 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:36Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.065296 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.065378 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.065607 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.065635 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.065888 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:36Z","lastTransitionTime":"2026-01-05T21:51:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.075887 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:36Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.087833 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:36Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.109020 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:36Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.122541 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:36Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.142340 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:36Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.169483 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.169521 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.169535 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.169555 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.169570 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:36Z","lastTransitionTime":"2026-01-05T21:51:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.171996 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:36Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.193068 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:36Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.272868 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.272905 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.272914 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.272930 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.272939 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:36Z","lastTransitionTime":"2026-01-05T21:51:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.376817 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.376887 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.376909 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.376939 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.376959 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:36Z","lastTransitionTime":"2026-01-05T21:51:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.480278 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.480338 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.480354 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.480382 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.480399 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:36Z","lastTransitionTime":"2026-01-05T21:51:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.583742 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.583806 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.583825 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.583852 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.583873 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:36Z","lastTransitionTime":"2026-01-05T21:51:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.687954 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.688004 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.688016 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.688035 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.688049 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:36Z","lastTransitionTime":"2026-01-05T21:51:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.720786 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.720852 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.720786 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:51:36 crc kubenswrapper[4910]: E0105 21:51:36.720933 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:51:36 crc kubenswrapper[4910]: E0105 21:51:36.721086 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:51:36 crc kubenswrapper[4910]: E0105 21:51:36.722253 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.794322 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.794414 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.794440 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.794478 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.794498 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:36Z","lastTransitionTime":"2026-01-05T21:51:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.898529 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.898614 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.898634 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.898667 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.898687 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:36Z","lastTransitionTime":"2026-01-05T21:51:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.917443 4910 generic.go:334] "Generic (PLEG): container finished" podID="474e7e8c-c9f5-4f54-81c9-0976bcc6565d" containerID="8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6" exitCode=0 Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.917568 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" event={"ID":"474e7e8c-c9f5-4f54-81c9-0976bcc6565d","Type":"ContainerDied","Data":"8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6"} Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.924360 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" event={"ID":"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b","Type":"ContainerStarted","Data":"7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8"} Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.959567 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:36Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.980651 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:36Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:36 crc kubenswrapper[4910]: I0105 21:51:36.994780 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:36Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.002468 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.002530 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.002557 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.002594 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.002625 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:37Z","lastTransitionTime":"2026-01-05T21:51:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.011743 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:37Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.039442 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:37Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.056151 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:37Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.068989 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:37Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.089991 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:37Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.121379 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.121424 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.121434 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.121451 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.121461 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:37Z","lastTransitionTime":"2026-01-05T21:51:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.131384 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:37Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.168795 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:37Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.188044 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:37Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.201764 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:37Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.214822 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:37Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.224833 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.224863 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.224872 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.224891 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.224901 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:37Z","lastTransitionTime":"2026-01-05T21:51:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.228332 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:37Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.241362 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:37Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.327305 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.327337 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.327346 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.327361 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.327370 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:37Z","lastTransitionTime":"2026-01-05T21:51:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.430301 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.430337 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.430346 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.430362 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.430371 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:37Z","lastTransitionTime":"2026-01-05T21:51:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.533296 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.533346 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.533357 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.533375 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.533386 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:37Z","lastTransitionTime":"2026-01-05T21:51:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.636814 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.636854 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.636866 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.636885 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.636898 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:37Z","lastTransitionTime":"2026-01-05T21:51:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.742374 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.742435 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.742455 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.742482 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.742499 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:37Z","lastTransitionTime":"2026-01-05T21:51:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.844947 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.845362 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.845373 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.845429 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.845441 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:37Z","lastTransitionTime":"2026-01-05T21:51:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.932003 4910 generic.go:334] "Generic (PLEG): container finished" podID="474e7e8c-c9f5-4f54-81c9-0976bcc6565d" containerID="d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef" exitCode=0 Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.932088 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" event={"ID":"474e7e8c-c9f5-4f54-81c9-0976bcc6565d","Type":"ContainerDied","Data":"d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef"} Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.948872 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.948933 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.948949 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.948971 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.948983 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:37Z","lastTransitionTime":"2026-01-05T21:51:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.954176 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:37Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.971161 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:37Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:37 crc kubenswrapper[4910]: I0105 21:51:37.990543 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:37Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.012460 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.031729 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.051445 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.053806 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.053870 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.057952 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.058306 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.058341 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:38Z","lastTransitionTime":"2026-01-05T21:51:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.068399 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.087114 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.107405 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.123866 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.142677 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.161293 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.161343 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.161359 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.161382 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.161397 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:38Z","lastTransitionTime":"2026-01-05T21:51:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.175179 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.201296 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.223830 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.245322 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.264967 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.265022 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.265041 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.265073 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.265094 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:38Z","lastTransitionTime":"2026-01-05T21:51:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.368610 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.368664 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.368680 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.368704 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.368725 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:38Z","lastTransitionTime":"2026-01-05T21:51:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.472819 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.472893 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.472915 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.472945 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.472966 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:38Z","lastTransitionTime":"2026-01-05T21:51:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.575958 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.576343 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.576686 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.577005 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.577318 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:38Z","lastTransitionTime":"2026-01-05T21:51:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.681800 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.681831 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.681841 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.681857 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.681867 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:38Z","lastTransitionTime":"2026-01-05T21:51:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.721018 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.721054 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.721306 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:51:38 crc kubenswrapper[4910]: E0105 21:51:38.721295 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:51:38 crc kubenswrapper[4910]: E0105 21:51:38.721580 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:51:38 crc kubenswrapper[4910]: E0105 21:51:38.721479 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.744022 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.764020 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.784777 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.785635 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.785874 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.785996 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.786193 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.786421 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:38Z","lastTransitionTime":"2026-01-05T21:51:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.802820 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.819491 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.835100 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.866593 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.881911 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.889653 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.889698 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.889707 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.889726 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.889738 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:38Z","lastTransitionTime":"2026-01-05T21:51:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.898901 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.915289 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.939264 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.943263 4910 generic.go:334] "Generic (PLEG): container finished" podID="474e7e8c-c9f5-4f54-81c9-0976bcc6565d" containerID="3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555" exitCode=0 Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.943320 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" event={"ID":"474e7e8c-c9f5-4f54-81c9-0976bcc6565d","Type":"ContainerDied","Data":"3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555"} Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.976279 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.992577 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.994090 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.994150 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.994159 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.994181 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:38 crc kubenswrapper[4910]: I0105 21:51:38.994191 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:38Z","lastTransitionTime":"2026-01-05T21:51:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.007929 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:39Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.024182 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:39Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.041947 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:39Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.060555 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:39Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.082176 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:39Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.099477 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.099548 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.099562 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.099612 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.099625 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:39Z","lastTransitionTime":"2026-01-05T21:51:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.110310 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:39Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.131247 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:39Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.154832 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:39Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.168356 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:39Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.186929 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:39Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.201442 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:39Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.202428 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.202470 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.202486 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.202511 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.202527 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:39Z","lastTransitionTime":"2026-01-05T21:51:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.216545 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:39Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.231358 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:39Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.245992 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:39Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.256614 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:39Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.268066 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:39Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.281722 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:39Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.305191 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.305235 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.305244 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.305263 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.305274 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:39Z","lastTransitionTime":"2026-01-05T21:51:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.408017 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.408073 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.408087 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.408108 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.408144 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:39Z","lastTransitionTime":"2026-01-05T21:51:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.512871 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.512935 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.512955 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.512989 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.513007 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:39Z","lastTransitionTime":"2026-01-05T21:51:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.615279 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.615322 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.615334 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.615352 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.615366 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:39Z","lastTransitionTime":"2026-01-05T21:51:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.717728 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.717812 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.717831 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.717858 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.717878 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:39Z","lastTransitionTime":"2026-01-05T21:51:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.821573 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.821646 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.821665 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.821693 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.821716 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:39Z","lastTransitionTime":"2026-01-05T21:51:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.924916 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.924986 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.925020 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.925062 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.925086 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:39Z","lastTransitionTime":"2026-01-05T21:51:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.954938 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" event={"ID":"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b","Type":"ContainerStarted","Data":"91e79834c0a89fe97ce3ce47a557696cac36a47fc7d2dbe2f7ca2f6249de7f65"} Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.955897 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.961989 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" event={"ID":"474e7e8c-c9f5-4f54-81c9-0976bcc6565d","Type":"ContainerStarted","Data":"3190d750f1a136cbc29252b21fa6112912e3884803e4b9ab4d3243eddeebf5c3"} Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.990300 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:39 crc kubenswrapper[4910]: I0105 21:51:39.994342 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91e79834c0a89fe97ce3ce47a557696cac36a47fc7d2dbe2f7ca2f6249de7f65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:39Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.029325 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.029401 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.029429 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.029462 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.029486 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:40Z","lastTransitionTime":"2026-01-05T21:51:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.031286 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.052532 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.068602 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.091021 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.110831 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.126953 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.132056 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.132097 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.132112 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.132150 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.132167 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:40Z","lastTransitionTime":"2026-01-05T21:51:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.145272 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.168218 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.185920 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.201895 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.213886 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.229402 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.234832 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.234906 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.234925 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.234955 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.234975 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:40Z","lastTransitionTime":"2026-01-05T21:51:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.246729 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.264743 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.287312 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.307685 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.314076 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.314149 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.314163 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.314185 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.314201 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:40Z","lastTransitionTime":"2026-01-05T21:51:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.328671 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: E0105 21:51:40.335818 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.341078 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.341143 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.341157 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.341173 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.341186 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:40Z","lastTransitionTime":"2026-01-05T21:51:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.354414 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: E0105 21:51:40.360758 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.366177 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.366239 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.366256 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.366280 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.366302 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:40Z","lastTransitionTime":"2026-01-05T21:51:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.379714 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: E0105 21:51:40.386718 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.391762 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.391800 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.391825 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.391845 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.391857 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:40Z","lastTransitionTime":"2026-01-05T21:51:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.401271 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: E0105 21:51:40.413321 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.419488 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.419551 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.419569 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.419595 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.419617 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:40Z","lastTransitionTime":"2026-01-05T21:51:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.423029 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: E0105 21:51:40.441697 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: E0105 21:51:40.441906 4910 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.444522 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.444573 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.444594 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.444622 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.444640 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:40Z","lastTransitionTime":"2026-01-05T21:51:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.447912 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.466506 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.488663 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.512379 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.526588 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.540618 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.546920 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.546961 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.546971 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.546987 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.546997 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:40Z","lastTransitionTime":"2026-01-05T21:51:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.555917 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3190d750f1a136cbc29252b21fa6112912e3884803e4b9ab4d3243eddeebf5c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.576371 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91e79834c0a89fe97ce3ce47a557696cac36a47fc7d2dbe2f7ca2f6249de7f65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:40Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.650806 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.650864 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.650875 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.650896 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.650908 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:40Z","lastTransitionTime":"2026-01-05T21:51:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.720748 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.720788 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:51:40 crc kubenswrapper[4910]: E0105 21:51:40.720940 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.721000 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:51:40 crc kubenswrapper[4910]: E0105 21:51:40.721110 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:51:40 crc kubenswrapper[4910]: E0105 21:51:40.721214 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.753270 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.753323 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.753336 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.753362 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.753377 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:40Z","lastTransitionTime":"2026-01-05T21:51:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.856433 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.856485 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.856497 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.856516 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.856528 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:40Z","lastTransitionTime":"2026-01-05T21:51:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.960010 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.960067 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.960081 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.960104 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.960120 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:40Z","lastTransitionTime":"2026-01-05T21:51:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.964304 4910 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.964719 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:40 crc kubenswrapper[4910]: I0105 21:51:40.989211 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.002721 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:41Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.013056 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:41Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.027480 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:41Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.046607 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:41Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.064373 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.064455 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.064481 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.064512 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.064541 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:41Z","lastTransitionTime":"2026-01-05T21:51:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.067405 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:41Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.086647 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:41Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.090082 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.106559 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:41Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.125763 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:41Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.140567 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:41Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.156209 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:41Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.167021 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.167059 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.167070 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.167090 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.167102 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:41Z","lastTransitionTime":"2026-01-05T21:51:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.177576 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:41Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.213235 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3190d750f1a136cbc29252b21fa6112912e3884803e4b9ab4d3243eddeebf5c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:41Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.236938 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91e79834c0a89fe97ce3ce47a557696cac36a47fc7d2dbe2f7ca2f6249de7f65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:41Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.267951 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:41Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.270572 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.270624 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.270635 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.270728 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.270746 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:41Z","lastTransitionTime":"2026-01-05T21:51:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.282261 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:41Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.373070 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.373327 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.373455 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.373618 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.373748 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:41Z","lastTransitionTime":"2026-01-05T21:51:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.476483 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.476758 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.476863 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.476969 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.477258 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:41Z","lastTransitionTime":"2026-01-05T21:51:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.580292 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.580333 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.580343 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.580358 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.580372 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:41Z","lastTransitionTime":"2026-01-05T21:51:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.683292 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.683351 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.683363 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.683384 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.683397 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:41Z","lastTransitionTime":"2026-01-05T21:51:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.786441 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.786475 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.786488 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.786503 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.786514 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:41Z","lastTransitionTime":"2026-01-05T21:51:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.889783 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.889824 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.889834 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.889857 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.889867 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:41Z","lastTransitionTime":"2026-01-05T21:51:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.969409 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpk76_f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b/ovnkube-controller/0.log" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.973039 4910 generic.go:334] "Generic (PLEG): container finished" podID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerID="91e79834c0a89fe97ce3ce47a557696cac36a47fc7d2dbe2f7ca2f6249de7f65" exitCode=1 Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.973107 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" event={"ID":"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b","Type":"ContainerDied","Data":"91e79834c0a89fe97ce3ce47a557696cac36a47fc7d2dbe2f7ca2f6249de7f65"} Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.974010 4910 scope.go:117] "RemoveContainer" containerID="91e79834c0a89fe97ce3ce47a557696cac36a47fc7d2dbe2f7ca2f6249de7f65" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.992731 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.992792 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.992809 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.992830 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.992869 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:41Z","lastTransitionTime":"2026-01-05T21:51:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:41 crc kubenswrapper[4910]: I0105 21:51:41.996714 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:41Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.016411 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:42Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.030512 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:42Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.045768 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:42Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.059376 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:42Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.082518 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:42Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.095884 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.095937 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.095954 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.095979 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.095995 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:42Z","lastTransitionTime":"2026-01-05T21:51:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.111095 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3190d750f1a136cbc29252b21fa6112912e3884803e4b9ab4d3243eddeebf5c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:42Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.152069 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91e79834c0a89fe97ce3ce47a557696cac36a47fc7d2dbe2f7ca2f6249de7f65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91e79834c0a89fe97ce3ce47a557696cac36a47fc7d2dbe2f7ca2f6249de7f65\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:51:41Z\\\",\\\"message\\\":\\\" 6226 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:41.330509 6226 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:41.331048 6226 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0105 21:51:41.331070 6226 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0105 21:51:41.331084 6226 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0105 21:51:41.331089 6226 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0105 21:51:41.331142 6226 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0105 21:51:41.331158 6226 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0105 21:51:41.331175 6226 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0105 21:51:41.331192 6226 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0105 21:51:41.331196 6226 factory.go:656] Stopping watch factory\\\\nI0105 21:51:41.331197 6226 handler.go:208] Removed *v1.Node event handler 7\\\\nI0105 21:51:41.331211 6226 ovnkube.go:599] Stopped ovnkube\\\\nI0105 21:51:41.331214 6226 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0105 21:51:41.331218 6226 handler.go:208] Removed *v1.Node event handler 2\\\\nI0105 21:51:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:42Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.177237 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:42Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.191539 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:42Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.197876 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.197903 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.197912 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.197927 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.197936 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:42Z","lastTransitionTime":"2026-01-05T21:51:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.207161 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:42Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.222251 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:42Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.235886 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:42Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.248178 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:42Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.263968 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:42Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.300641 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.300706 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.300719 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.300737 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.300746 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:42Z","lastTransitionTime":"2026-01-05T21:51:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.403888 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.403944 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.403959 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.403984 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.404000 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:42Z","lastTransitionTime":"2026-01-05T21:51:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.506508 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.506552 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.506562 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.506579 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.506590 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:42Z","lastTransitionTime":"2026-01-05T21:51:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.544987 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.545168 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:51:42 crc kubenswrapper[4910]: E0105 21:51:42.545249 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:51:58.545218263 +0000 UTC m=+50.122715953 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.545340 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:51:42 crc kubenswrapper[4910]: E0105 21:51:42.545354 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.545406 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:51:42 crc kubenswrapper[4910]: E0105 21:51:42.545455 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 05 21:51:42 crc kubenswrapper[4910]: E0105 21:51:42.545471 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 05 21:51:42 crc kubenswrapper[4910]: E0105 21:51:42.545477 4910 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 05 21:51:42 crc kubenswrapper[4910]: E0105 21:51:42.545488 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.545492 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:51:42 crc kubenswrapper[4910]: E0105 21:51:42.545503 4910 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 05 21:51:42 crc kubenswrapper[4910]: E0105 21:51:42.545601 4910 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 05 21:51:42 crc kubenswrapper[4910]: E0105 21:51:42.545406 4910 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 05 21:51:42 crc kubenswrapper[4910]: E0105 21:51:42.545541 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-05 21:51:58.54552281 +0000 UTC m=+50.123020470 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 05 21:51:42 crc kubenswrapper[4910]: E0105 21:51:42.545674 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-05 21:51:58.545662923 +0000 UTC m=+50.123160603 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 05 21:51:42 crc kubenswrapper[4910]: E0105 21:51:42.545690 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-05 21:51:58.545681354 +0000 UTC m=+50.123179044 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 05 21:51:42 crc kubenswrapper[4910]: E0105 21:51:42.545707 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-05 21:51:58.545699874 +0000 UTC m=+50.123197554 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.609677 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.609727 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.609736 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.609753 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.609765 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:42Z","lastTransitionTime":"2026-01-05T21:51:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.712668 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.712709 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.712718 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.712733 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.712743 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:42Z","lastTransitionTime":"2026-01-05T21:51:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.721149 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.721180 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:51:42 crc kubenswrapper[4910]: E0105 21:51:42.721247 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.721274 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:51:42 crc kubenswrapper[4910]: E0105 21:51:42.721381 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:51:42 crc kubenswrapper[4910]: E0105 21:51:42.721472 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.814774 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.814813 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.814824 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.814840 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.814850 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:42Z","lastTransitionTime":"2026-01-05T21:51:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.917397 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.917462 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.917477 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.917498 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.917512 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:42Z","lastTransitionTime":"2026-01-05T21:51:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.978661 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpk76_f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b/ovnkube-controller/0.log" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.982371 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" event={"ID":"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b","Type":"ContainerStarted","Data":"8537cba3640eac3be42cb9ac3518001d5a49068a24d3b909acac62b81ba70a55"} Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.982912 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:42 crc kubenswrapper[4910]: I0105 21:51:42.998318 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:42Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.014964 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:43Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.020321 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.020361 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.020380 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.020401 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.020415 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:43Z","lastTransitionTime":"2026-01-05T21:51:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.036247 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:43Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.051049 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:43Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.063405 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:43Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.087595 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:43Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.101188 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:43Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.114819 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:43Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.122814 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.122861 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.122875 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.122897 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.122909 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:43Z","lastTransitionTime":"2026-01-05T21:51:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.136331 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3190d750f1a136cbc29252b21fa6112912e3884803e4b9ab4d3243eddeebf5c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:43Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.163645 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8537cba3640eac3be42cb9ac3518001d5a49068a24d3b909acac62b81ba70a55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91e79834c0a89fe97ce3ce47a557696cac36a47fc7d2dbe2f7ca2f6249de7f65\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:51:41Z\\\",\\\"message\\\":\\\" 6226 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:41.330509 6226 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:41.331048 6226 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0105 21:51:41.331070 6226 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0105 21:51:41.331084 6226 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0105 21:51:41.331089 6226 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0105 21:51:41.331142 6226 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0105 21:51:41.331158 6226 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0105 21:51:41.331175 6226 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0105 21:51:41.331192 6226 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0105 21:51:41.331196 6226 factory.go:656] Stopping watch factory\\\\nI0105 21:51:41.331197 6226 handler.go:208] Removed *v1.Node event handler 7\\\\nI0105 21:51:41.331211 6226 ovnkube.go:599] Stopped ovnkube\\\\nI0105 21:51:41.331214 6226 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0105 21:51:41.331218 6226 handler.go:208] Removed *v1.Node event handler 2\\\\nI0105 21:51:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:43Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.180486 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:43Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.191797 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:43Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.211590 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:43Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.225976 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.226012 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.226025 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.226045 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.226064 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:43Z","lastTransitionTime":"2026-01-05T21:51:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.226880 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:43Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.241278 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:43Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.329434 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.329488 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.329503 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.329527 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.329541 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:43Z","lastTransitionTime":"2026-01-05T21:51:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.433150 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.433217 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.433236 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.433263 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.433282 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:43Z","lastTransitionTime":"2026-01-05T21:51:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.535622 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.535673 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.535687 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.535707 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.535717 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:43Z","lastTransitionTime":"2026-01-05T21:51:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.639347 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.639391 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.639403 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.639420 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.639432 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:43Z","lastTransitionTime":"2026-01-05T21:51:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.743175 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.743243 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.743265 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.743291 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.743312 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:43Z","lastTransitionTime":"2026-01-05T21:51:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.847535 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.847624 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.847648 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.847690 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.847716 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:43Z","lastTransitionTime":"2026-01-05T21:51:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.950600 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.950687 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.950711 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.950745 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:43 crc kubenswrapper[4910]: I0105 21:51:43.950765 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:43Z","lastTransitionTime":"2026-01-05T21:51:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.054396 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.054465 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.054484 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.054511 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.054535 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:44Z","lastTransitionTime":"2026-01-05T21:51:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.157692 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.157787 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.157811 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.157847 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.157871 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:44Z","lastTransitionTime":"2026-01-05T21:51:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.261076 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.261204 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.261231 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.261270 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.261294 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:44Z","lastTransitionTime":"2026-01-05T21:51:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.364387 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.364452 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.364471 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.364509 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.364535 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:44Z","lastTransitionTime":"2026-01-05T21:51:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.468507 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.468572 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.468589 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.468614 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.468634 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:44Z","lastTransitionTime":"2026-01-05T21:51:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.572074 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.572176 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.572194 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.572224 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.572243 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:44Z","lastTransitionTime":"2026-01-05T21:51:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.579201 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4"] Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.580180 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.584374 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.584628 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.601558 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-t58h4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:44Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.631219 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:44Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.654928 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:44Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.670820 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-t58h4\" (UID: \"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.670970 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e-env-overrides\") pod \"ovnkube-control-plane-749d76644c-t58h4\" (UID: \"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.671017 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-t58h4\" (UID: \"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.671060 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dpbhj\" (UniqueName: \"kubernetes.io/projected/9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e-kube-api-access-dpbhj\") pod \"ovnkube-control-plane-749d76644c-t58h4\" (UID: \"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.675781 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.675931 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.676015 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.676164 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.676287 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:44Z","lastTransitionTime":"2026-01-05T21:51:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.679941 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:44Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.704383 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:44Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.721331 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:44Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.721703 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.721781 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:51:44 crc kubenswrapper[4910]: E0105 21:51:44.721949 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.721966 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:51:44 crc kubenswrapper[4910]: E0105 21:51:44.722098 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:51:44 crc kubenswrapper[4910]: E0105 21:51:44.722231 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.743375 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:44Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.767904 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:44Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.771758 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-t58h4\" (UID: \"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.771821 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dpbhj\" (UniqueName: \"kubernetes.io/projected/9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e-kube-api-access-dpbhj\") pod \"ovnkube-control-plane-749d76644c-t58h4\" (UID: \"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.771923 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-t58h4\" (UID: \"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.772017 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e-env-overrides\") pod \"ovnkube-control-plane-749d76644c-t58h4\" (UID: \"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.773159 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e-env-overrides\") pod \"ovnkube-control-plane-749d76644c-t58h4\" (UID: \"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.773490 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-t58h4\" (UID: \"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.780626 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.780686 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.780710 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.780744 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.780769 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:44Z","lastTransitionTime":"2026-01-05T21:51:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.782499 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-t58h4\" (UID: \"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.796605 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:44Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.811091 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dpbhj\" (UniqueName: \"kubernetes.io/projected/9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e-kube-api-access-dpbhj\") pod \"ovnkube-control-plane-749d76644c-t58h4\" (UID: \"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.830481 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8537cba3640eac3be42cb9ac3518001d5a49068a24d3b909acac62b81ba70a55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91e79834c0a89fe97ce3ce47a557696cac36a47fc7d2dbe2f7ca2f6249de7f65\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:51:41Z\\\",\\\"message\\\":\\\" 6226 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:41.330509 6226 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:41.331048 6226 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0105 21:51:41.331070 6226 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0105 21:51:41.331084 6226 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0105 21:51:41.331089 6226 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0105 21:51:41.331142 6226 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0105 21:51:41.331158 6226 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0105 21:51:41.331175 6226 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0105 21:51:41.331192 6226 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0105 21:51:41.331196 6226 factory.go:656] Stopping watch factory\\\\nI0105 21:51:41.331197 6226 handler.go:208] Removed *v1.Node event handler 7\\\\nI0105 21:51:41.331211 6226 ovnkube.go:599] Stopped ovnkube\\\\nI0105 21:51:41.331214 6226 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0105 21:51:41.331218 6226 handler.go:208] Removed *v1.Node event handler 2\\\\nI0105 21:51:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:44Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.862949 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:44Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.883483 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:44Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.887174 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.887252 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.887276 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.887309 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.887332 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:44Z","lastTransitionTime":"2026-01-05T21:51:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.893710 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.900966 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:44Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.903993 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" Jan 05 21:51:44 crc kubenswrapper[4910]: W0105 21:51:44.924677 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9e4e2a14_7b3c_4fbe_8ec5_11428c5a0d5e.slice/crio-61792ff840a18036000f5118f66bb4881a92cc7b2c5de0d3078bc840f3bc52e8 WatchSource:0}: Error finding container 61792ff840a18036000f5118f66bb4881a92cc7b2c5de0d3078bc840f3bc52e8: Status 404 returned error can't find the container with id 61792ff840a18036000f5118f66bb4881a92cc7b2c5de0d3078bc840f3bc52e8 Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.925914 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3190d750f1a136cbc29252b21fa6112912e3884803e4b9ab4d3243eddeebf5c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:44Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.944942 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:44Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.959492 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:44Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.976660 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-t58h4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:44Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.989346 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" event={"ID":"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e","Type":"ContainerStarted","Data":"61792ff840a18036000f5118f66bb4881a92cc7b2c5de0d3078bc840f3bc52e8"} Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.989389 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.989462 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.989483 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.989513 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.989534 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:44Z","lastTransitionTime":"2026-01-05T21:51:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.991060 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpk76_f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b/ovnkube-controller/1.log" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.991782 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpk76_f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b/ovnkube-controller/0.log" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.994345 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:44Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.995551 4910 generic.go:334] "Generic (PLEG): container finished" podID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerID="8537cba3640eac3be42cb9ac3518001d5a49068a24d3b909acac62b81ba70a55" exitCode=1 Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.995614 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" event={"ID":"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b","Type":"ContainerDied","Data":"8537cba3640eac3be42cb9ac3518001d5a49068a24d3b909acac62b81ba70a55"} Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.995782 4910 scope.go:117] "RemoveContainer" containerID="91e79834c0a89fe97ce3ce47a557696cac36a47fc7d2dbe2f7ca2f6249de7f65" Jan 05 21:51:44 crc kubenswrapper[4910]: I0105 21:51:44.996724 4910 scope.go:117] "RemoveContainer" containerID="8537cba3640eac3be42cb9ac3518001d5a49068a24d3b909acac62b81ba70a55" Jan 05 21:51:44 crc kubenswrapper[4910]: E0105 21:51:44.996884 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-fpk76_openshift-ovn-kubernetes(f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.010113 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:45Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.023605 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:45Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.036271 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:45Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.047482 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:45Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.060587 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:45Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.071797 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:45Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.085801 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:45Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.092135 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.092172 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.092182 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.092199 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.092208 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:45Z","lastTransitionTime":"2026-01-05T21:51:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.104815 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8537cba3640eac3be42cb9ac3518001d5a49068a24d3b909acac62b81ba70a55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91e79834c0a89fe97ce3ce47a557696cac36a47fc7d2dbe2f7ca2f6249de7f65\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:51:41Z\\\",\\\"message\\\":\\\" 6226 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:41.330509 6226 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:41.331048 6226 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0105 21:51:41.331070 6226 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0105 21:51:41.331084 6226 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0105 21:51:41.331089 6226 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0105 21:51:41.331142 6226 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0105 21:51:41.331158 6226 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0105 21:51:41.331175 6226 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0105 21:51:41.331192 6226 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0105 21:51:41.331196 6226 factory.go:656] Stopping watch factory\\\\nI0105 21:51:41.331197 6226 handler.go:208] Removed *v1.Node event handler 7\\\\nI0105 21:51:41.331211 6226 ovnkube.go:599] Stopped ovnkube\\\\nI0105 21:51:41.331214 6226 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0105 21:51:41.331218 6226 handler.go:208] Removed *v1.Node event handler 2\\\\nI0105 21:51:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:45Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.129557 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:45Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.143162 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:45Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.156890 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:45Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.172682 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3190d750f1a136cbc29252b21fa6112912e3884803e4b9ab4d3243eddeebf5c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:45Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.189157 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:45Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.195640 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.195686 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.195727 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.195771 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.195783 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:45Z","lastTransitionTime":"2026-01-05T21:51:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.205464 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:45Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.234254 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:45Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.248363 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:45Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.264177 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:45Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.280139 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3190d750f1a136cbc29252b21fa6112912e3884803e4b9ab4d3243eddeebf5c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:45Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.298304 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.298631 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.298658 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.298680 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.298698 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:45Z","lastTransitionTime":"2026-01-05T21:51:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.303834 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8537cba3640eac3be42cb9ac3518001d5a49068a24d3b909acac62b81ba70a55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91e79834c0a89fe97ce3ce47a557696cac36a47fc7d2dbe2f7ca2f6249de7f65\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:51:41Z\\\",\\\"message\\\":\\\" 6226 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:41.330509 6226 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:41.331048 6226 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0105 21:51:41.331070 6226 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0105 21:51:41.331084 6226 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0105 21:51:41.331089 6226 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0105 21:51:41.331142 6226 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0105 21:51:41.331158 6226 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0105 21:51:41.331175 6226 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0105 21:51:41.331192 6226 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0105 21:51:41.331196 6226 factory.go:656] Stopping watch factory\\\\nI0105 21:51:41.331197 6226 handler.go:208] Removed *v1.Node event handler 7\\\\nI0105 21:51:41.331211 6226 ovnkube.go:599] Stopped ovnkube\\\\nI0105 21:51:41.331214 6226 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0105 21:51:41.331218 6226 handler.go:208] Removed *v1.Node event handler 2\\\\nI0105 21:51:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8537cba3640eac3be42cb9ac3518001d5a49068a24d3b909acac62b81ba70a55\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"message\\\":\\\"l\\\\u003e UUID: UUIDName:}]\\\\nI0105 21:51:42.843082 6355 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0105 21:51:42.843123 6355 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:43933d5e-3c3b-4ff8-8926-04ac25de450e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0105 21:51:42.843172 6355 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:45Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.319575 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:45Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.332963 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:45Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.345831 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:45Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.359957 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:45Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.373690 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:45Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.384536 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-t58h4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:45Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.400939 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.400991 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.401004 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.401025 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.401039 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:45Z","lastTransitionTime":"2026-01-05T21:51:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.402341 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:45Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.417552 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:45Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.434158 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:45Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.447548 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:45Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.458811 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:45Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.503925 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.503970 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.503979 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.503994 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.504006 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:45Z","lastTransitionTime":"2026-01-05T21:51:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.606684 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.606729 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.606738 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.606756 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.606768 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:45Z","lastTransitionTime":"2026-01-05T21:51:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.708973 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.709044 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.709062 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.709085 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.709098 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:45Z","lastTransitionTime":"2026-01-05T21:51:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.811817 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.811871 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.811880 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.811900 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.811910 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:45Z","lastTransitionTime":"2026-01-05T21:51:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.914901 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.914973 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.914991 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.915019 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:45 crc kubenswrapper[4910]: I0105 21:51:45.915039 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:45Z","lastTransitionTime":"2026-01-05T21:51:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.003338 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" event={"ID":"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e","Type":"ContainerStarted","Data":"ef3f374683807a8c6f06bb4e6ff3dab813a47759f249583a9bd1b879619b1d1d"} Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.003406 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" event={"ID":"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e","Type":"ContainerStarted","Data":"638c1985fe0a3b8f01687e06ff21e90cc026dadac8f6ee435d9c695c85a6ae48"} Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.006304 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpk76_f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b/ovnkube-controller/1.log" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.017549 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.017589 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.017607 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.017633 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.017651 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:46Z","lastTransitionTime":"2026-01-05T21:51:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.021831 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.039959 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.058794 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3190d750f1a136cbc29252b21fa6112912e3884803e4b9ab4d3243eddeebf5c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.077267 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-mns6n"] Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.078876 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:51:46 crc kubenswrapper[4910]: E0105 21:51:46.078999 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.087460 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8537cba3640eac3be42cb9ac3518001d5a49068a24d3b909acac62b81ba70a55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91e79834c0a89fe97ce3ce47a557696cac36a47fc7d2dbe2f7ca2f6249de7f65\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:51:41Z\\\",\\\"message\\\":\\\" 6226 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:41.330509 6226 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:41.331048 6226 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0105 21:51:41.331070 6226 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0105 21:51:41.331084 6226 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0105 21:51:41.331089 6226 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0105 21:51:41.331142 6226 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0105 21:51:41.331158 6226 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0105 21:51:41.331175 6226 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0105 21:51:41.331192 6226 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0105 21:51:41.331196 6226 factory.go:656] Stopping watch factory\\\\nI0105 21:51:41.331197 6226 handler.go:208] Removed *v1.Node event handler 7\\\\nI0105 21:51:41.331211 6226 ovnkube.go:599] Stopped ovnkube\\\\nI0105 21:51:41.331214 6226 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0105 21:51:41.331218 6226 handler.go:208] Removed *v1.Node event handler 2\\\\nI0105 21:51:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8537cba3640eac3be42cb9ac3518001d5a49068a24d3b909acac62b81ba70a55\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"message\\\":\\\"l\\\\u003e UUID: UUIDName:}]\\\\nI0105 21:51:42.843082 6355 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0105 21:51:42.843123 6355 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:43933d5e-3c3b-4ff8-8926-04ac25de450e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0105 21:51:42.843172 6355 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.110971 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.121339 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.121385 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.121395 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.121414 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.121426 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:46Z","lastTransitionTime":"2026-01-05T21:51:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.131527 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.148774 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.164018 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.177724 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.187235 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/74c455b1-4706-4ca7-bd82-2b99c3c83e3f-metrics-certs\") pod \"network-metrics-daemon-mns6n\" (UID: \"74c455b1-4706-4ca7-bd82-2b99c3c83e3f\") " pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.187329 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rr4qc\" (UniqueName: \"kubernetes.io/projected/74c455b1-4706-4ca7-bd82-2b99c3c83e3f-kube-api-access-rr4qc\") pod \"network-metrics-daemon-mns6n\" (UID: \"74c455b1-4706-4ca7-bd82-2b99c3c83e3f\") " pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.190205 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638c1985fe0a3b8f01687e06ff21e90cc026dadac8f6ee435d9c695c85a6ae48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3f374683807a8c6f06bb4e6ff3dab813a47759f249583a9bd1b879619b1d1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-t58h4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.206364 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.220026 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.224313 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.224357 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.224370 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.224403 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.224418 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:46Z","lastTransitionTime":"2026-01-05T21:51:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.236737 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.250830 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.265552 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.276244 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.288918 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/74c455b1-4706-4ca7-bd82-2b99c3c83e3f-metrics-certs\") pod \"network-metrics-daemon-mns6n\" (UID: \"74c455b1-4706-4ca7-bd82-2b99c3c83e3f\") " pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.289013 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rr4qc\" (UniqueName: \"kubernetes.io/projected/74c455b1-4706-4ca7-bd82-2b99c3c83e3f-kube-api-access-rr4qc\") pod \"network-metrics-daemon-mns6n\" (UID: \"74c455b1-4706-4ca7-bd82-2b99c3c83e3f\") " pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:51:46 crc kubenswrapper[4910]: E0105 21:51:46.289420 4910 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 05 21:51:46 crc kubenswrapper[4910]: E0105 21:51:46.289540 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/74c455b1-4706-4ca7-bd82-2b99c3c83e3f-metrics-certs podName:74c455b1-4706-4ca7-bd82-2b99c3c83e3f nodeName:}" failed. No retries permitted until 2026-01-05 21:51:46.789508942 +0000 UTC m=+38.367006622 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/74c455b1-4706-4ca7-bd82-2b99c3c83e3f-metrics-certs") pod "network-metrics-daemon-mns6n" (UID: "74c455b1-4706-4ca7-bd82-2b99c3c83e3f") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.292946 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.304992 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.316360 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rr4qc\" (UniqueName: \"kubernetes.io/projected/74c455b1-4706-4ca7-bd82-2b99c3c83e3f-kube-api-access-rr4qc\") pod \"network-metrics-daemon-mns6n\" (UID: \"74c455b1-4706-4ca7-bd82-2b99c3c83e3f\") " pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.323139 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.327155 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.327190 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.327202 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.327219 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.327231 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:46Z","lastTransitionTime":"2026-01-05T21:51:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.336325 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.349549 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.364572 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638c1985fe0a3b8f01687e06ff21e90cc026dadac8f6ee435d9c695c85a6ae48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3f374683807a8c6f06bb4e6ff3dab813a47759f249583a9bd1b879619b1d1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-t58h4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.376985 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.391389 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.405477 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.419376 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.430157 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.430217 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.430229 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.430249 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.430260 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:46Z","lastTransitionTime":"2026-01-05T21:51:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.435065 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.449440 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mns6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74c455b1-4706-4ca7-bd82-2b99c3c83e3f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mns6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.477803 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.492626 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.509170 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.528956 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3190d750f1a136cbc29252b21fa6112912e3884803e4b9ab4d3243eddeebf5c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.533681 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.533752 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.533811 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.533842 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.533864 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:46Z","lastTransitionTime":"2026-01-05T21:51:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.553437 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8537cba3640eac3be42cb9ac3518001d5a49068a24d3b909acac62b81ba70a55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91e79834c0a89fe97ce3ce47a557696cac36a47fc7d2dbe2f7ca2f6249de7f65\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:51:41Z\\\",\\\"message\\\":\\\" 6226 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:41.330509 6226 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:41.331048 6226 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0105 21:51:41.331070 6226 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0105 21:51:41.331084 6226 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0105 21:51:41.331089 6226 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0105 21:51:41.331142 6226 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0105 21:51:41.331158 6226 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0105 21:51:41.331175 6226 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0105 21:51:41.331192 6226 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0105 21:51:41.331196 6226 factory.go:656] Stopping watch factory\\\\nI0105 21:51:41.331197 6226 handler.go:208] Removed *v1.Node event handler 7\\\\nI0105 21:51:41.331211 6226 ovnkube.go:599] Stopped ovnkube\\\\nI0105 21:51:41.331214 6226 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0105 21:51:41.331218 6226 handler.go:208] Removed *v1.Node event handler 2\\\\nI0105 21:51:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8537cba3640eac3be42cb9ac3518001d5a49068a24d3b909acac62b81ba70a55\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"message\\\":\\\"l\\\\u003e UUID: UUIDName:}]\\\\nI0105 21:51:42.843082 6355 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0105 21:51:42.843123 6355 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:43933d5e-3c3b-4ff8-8926-04ac25de450e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0105 21:51:42.843172 6355 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:46Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.637480 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.637575 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.637601 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.637634 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.637657 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:46Z","lastTransitionTime":"2026-01-05T21:51:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.721327 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.721402 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:51:46 crc kubenswrapper[4910]: E0105 21:51:46.721723 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:51:46 crc kubenswrapper[4910]: E0105 21:51:46.721900 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.721432 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:51:46 crc kubenswrapper[4910]: E0105 21:51:46.722025 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.740179 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.740215 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.740232 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.740251 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.740265 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:46Z","lastTransitionTime":"2026-01-05T21:51:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.796172 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/74c455b1-4706-4ca7-bd82-2b99c3c83e3f-metrics-certs\") pod \"network-metrics-daemon-mns6n\" (UID: \"74c455b1-4706-4ca7-bd82-2b99c3c83e3f\") " pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:51:46 crc kubenswrapper[4910]: E0105 21:51:46.796413 4910 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 05 21:51:46 crc kubenswrapper[4910]: E0105 21:51:46.796524 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/74c455b1-4706-4ca7-bd82-2b99c3c83e3f-metrics-certs podName:74c455b1-4706-4ca7-bd82-2b99c3c83e3f nodeName:}" failed. No retries permitted until 2026-01-05 21:51:47.796497439 +0000 UTC m=+39.373995319 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/74c455b1-4706-4ca7-bd82-2b99c3c83e3f-metrics-certs") pod "network-metrics-daemon-mns6n" (UID: "74c455b1-4706-4ca7-bd82-2b99c3c83e3f") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.842900 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.842981 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.843003 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.843030 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.843051 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:46Z","lastTransitionTime":"2026-01-05T21:51:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.946164 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.946211 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.946223 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.946245 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:46 crc kubenswrapper[4910]: I0105 21:51:46.946262 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:46Z","lastTransitionTime":"2026-01-05T21:51:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.048996 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.049356 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.049625 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.049818 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.049991 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:47Z","lastTransitionTime":"2026-01-05T21:51:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.153113 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.153203 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.153223 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.153253 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.153279 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:47Z","lastTransitionTime":"2026-01-05T21:51:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.255702 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.255965 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.256028 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.256142 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.256215 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:47Z","lastTransitionTime":"2026-01-05T21:51:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.359980 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.360278 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.360344 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.360414 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.360484 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:47Z","lastTransitionTime":"2026-01-05T21:51:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.464309 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.464375 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.464399 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.464431 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.464452 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:47Z","lastTransitionTime":"2026-01-05T21:51:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.567495 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.567574 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.567594 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.567623 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.567642 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:47Z","lastTransitionTime":"2026-01-05T21:51:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.671161 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.671234 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.671261 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.671299 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.671320 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:47Z","lastTransitionTime":"2026-01-05T21:51:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.720923 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:51:47 crc kubenswrapper[4910]: E0105 21:51:47.721186 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.775228 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.775281 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.775296 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.775319 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.775339 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:47Z","lastTransitionTime":"2026-01-05T21:51:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.807939 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/74c455b1-4706-4ca7-bd82-2b99c3c83e3f-metrics-certs\") pod \"network-metrics-daemon-mns6n\" (UID: \"74c455b1-4706-4ca7-bd82-2b99c3c83e3f\") " pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:51:47 crc kubenswrapper[4910]: E0105 21:51:47.808269 4910 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 05 21:51:47 crc kubenswrapper[4910]: E0105 21:51:47.808361 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/74c455b1-4706-4ca7-bd82-2b99c3c83e3f-metrics-certs podName:74c455b1-4706-4ca7-bd82-2b99c3c83e3f nodeName:}" failed. No retries permitted until 2026-01-05 21:51:49.808334431 +0000 UTC m=+41.385832141 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/74c455b1-4706-4ca7-bd82-2b99c3c83e3f-metrics-certs") pod "network-metrics-daemon-mns6n" (UID: "74c455b1-4706-4ca7-bd82-2b99c3c83e3f") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.878756 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.878830 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.878856 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.878886 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.878907 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:47Z","lastTransitionTime":"2026-01-05T21:51:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.981886 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.982370 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.982497 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.982587 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:47 crc kubenswrapper[4910]: I0105 21:51:47.982705 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:47Z","lastTransitionTime":"2026-01-05T21:51:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.100630 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.100669 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.100681 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.100702 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.100716 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:48Z","lastTransitionTime":"2026-01-05T21:51:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.203627 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.203680 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.203693 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.203711 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.203724 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:48Z","lastTransitionTime":"2026-01-05T21:51:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.307219 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.307271 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.307284 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.307304 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.307320 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:48Z","lastTransitionTime":"2026-01-05T21:51:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.410658 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.410731 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.410744 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.410770 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.410784 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:48Z","lastTransitionTime":"2026-01-05T21:51:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.513988 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.514047 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.514060 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.514081 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.514094 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:48Z","lastTransitionTime":"2026-01-05T21:51:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.617558 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.617606 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.617619 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.617637 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.617649 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:48Z","lastTransitionTime":"2026-01-05T21:51:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.720878 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.721071 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.721415 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:51:48 crc kubenswrapper[4910]: E0105 21:51:48.721389 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.721546 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.721584 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.721601 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.721624 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.721645 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:48Z","lastTransitionTime":"2026-01-05T21:51:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:48 crc kubenswrapper[4910]: E0105 21:51:48.721686 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:51:48 crc kubenswrapper[4910]: E0105 21:51:48.721834 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.745199 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:48Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.768891 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3190d750f1a136cbc29252b21fa6112912e3884803e4b9ab4d3243eddeebf5c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:48Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.802356 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8537cba3640eac3be42cb9ac3518001d5a49068a24d3b909acac62b81ba70a55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91e79834c0a89fe97ce3ce47a557696cac36a47fc7d2dbe2f7ca2f6249de7f65\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:51:41Z\\\",\\\"message\\\":\\\" 6226 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:41.330509 6226 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:41.331048 6226 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0105 21:51:41.331070 6226 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0105 21:51:41.331084 6226 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0105 21:51:41.331089 6226 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0105 21:51:41.331142 6226 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0105 21:51:41.331158 6226 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0105 21:51:41.331175 6226 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0105 21:51:41.331192 6226 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0105 21:51:41.331196 6226 factory.go:656] Stopping watch factory\\\\nI0105 21:51:41.331197 6226 handler.go:208] Removed *v1.Node event handler 7\\\\nI0105 21:51:41.331211 6226 ovnkube.go:599] Stopped ovnkube\\\\nI0105 21:51:41.331214 6226 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0105 21:51:41.331218 6226 handler.go:208] Removed *v1.Node event handler 2\\\\nI0105 21:51:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8537cba3640eac3be42cb9ac3518001d5a49068a24d3b909acac62b81ba70a55\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"message\\\":\\\"l\\\\u003e UUID: UUIDName:}]\\\\nI0105 21:51:42.843082 6355 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0105 21:51:42.843123 6355 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:43933d5e-3c3b-4ff8-8926-04ac25de450e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0105 21:51:42.843172 6355 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:48Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.824538 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.824639 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.824661 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.824688 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.824748 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:48Z","lastTransitionTime":"2026-01-05T21:51:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.828598 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mns6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74c455b1-4706-4ca7-bd82-2b99c3c83e3f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mns6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:48Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.856928 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:48Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.874781 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:48Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.894157 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:48Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.911544 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:48Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.929343 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.929409 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.929426 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.929459 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.929480 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:48Z","lastTransitionTime":"2026-01-05T21:51:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.930668 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:48Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.950075 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:48Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.966210 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638c1985fe0a3b8f01687e06ff21e90cc026dadac8f6ee435d9c695c85a6ae48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3f374683807a8c6f06bb4e6ff3dab813a47759f249583a9bd1b879619b1d1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-t58h4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:48Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:48 crc kubenswrapper[4910]: I0105 21:51:48.987194 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:48Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.004178 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:49Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.021107 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:49Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.031873 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.031918 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.031932 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.031953 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.031967 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:49Z","lastTransitionTime":"2026-01-05T21:51:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.035179 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:49Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.048163 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:49Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.065576 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:49Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.134750 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.134815 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.134833 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.134879 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.134898 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:49Z","lastTransitionTime":"2026-01-05T21:51:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.238236 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.238304 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.238325 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.238394 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.238420 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:49Z","lastTransitionTime":"2026-01-05T21:51:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.341660 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.341737 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.341757 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.341786 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.341806 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:49Z","lastTransitionTime":"2026-01-05T21:51:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.446704 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.446786 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.446807 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.446837 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.446868 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:49Z","lastTransitionTime":"2026-01-05T21:51:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.549168 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.549238 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.549249 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.549266 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.549280 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:49Z","lastTransitionTime":"2026-01-05T21:51:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.652065 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.652132 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.652141 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.652161 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.652172 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:49Z","lastTransitionTime":"2026-01-05T21:51:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.720874 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:51:49 crc kubenswrapper[4910]: E0105 21:51:49.721034 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.754541 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.754572 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.754582 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.754597 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.754608 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:49Z","lastTransitionTime":"2026-01-05T21:51:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.857616 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.857693 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.857711 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.857740 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.857766 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:49Z","lastTransitionTime":"2026-01-05T21:51:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.894481 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/74c455b1-4706-4ca7-bd82-2b99c3c83e3f-metrics-certs\") pod \"network-metrics-daemon-mns6n\" (UID: \"74c455b1-4706-4ca7-bd82-2b99c3c83e3f\") " pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:51:49 crc kubenswrapper[4910]: E0105 21:51:49.894767 4910 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 05 21:51:49 crc kubenswrapper[4910]: E0105 21:51:49.894936 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/74c455b1-4706-4ca7-bd82-2b99c3c83e3f-metrics-certs podName:74c455b1-4706-4ca7-bd82-2b99c3c83e3f nodeName:}" failed. No retries permitted until 2026-01-05 21:51:53.894893608 +0000 UTC m=+45.472391318 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/74c455b1-4706-4ca7-bd82-2b99c3c83e3f-metrics-certs") pod "network-metrics-daemon-mns6n" (UID: "74c455b1-4706-4ca7-bd82-2b99c3c83e3f") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.961984 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.962051 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.962075 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.962148 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:49 crc kubenswrapper[4910]: I0105 21:51:49.962200 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:49Z","lastTransitionTime":"2026-01-05T21:51:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.065448 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.065506 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.065518 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.065539 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.065551 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:50Z","lastTransitionTime":"2026-01-05T21:51:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.169081 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.169154 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.169166 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.169186 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.169200 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:50Z","lastTransitionTime":"2026-01-05T21:51:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.272650 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.272704 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.272712 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.272730 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.272742 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:50Z","lastTransitionTime":"2026-01-05T21:51:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.375973 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.376046 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.376063 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.376093 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.376113 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:50Z","lastTransitionTime":"2026-01-05T21:51:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.478778 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.478838 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.478857 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.478881 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.478901 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:50Z","lastTransitionTime":"2026-01-05T21:51:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.581616 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.581652 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.581661 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.581678 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.581689 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:50Z","lastTransitionTime":"2026-01-05T21:51:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.684506 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.684554 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.684565 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.684583 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.684593 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:50Z","lastTransitionTime":"2026-01-05T21:51:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.704590 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.704655 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.704673 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.704697 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.704715 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:50Z","lastTransitionTime":"2026-01-05T21:51:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.721065 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:51:50 crc kubenswrapper[4910]: E0105 21:51:50.721233 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.721065 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.721311 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:51:50 crc kubenswrapper[4910]: E0105 21:51:50.721337 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:51:50 crc kubenswrapper[4910]: E0105 21:51:50.721431 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:51:50 crc kubenswrapper[4910]: E0105 21:51:50.726141 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:50Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.730153 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.730185 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.730196 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.730213 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.730224 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:50Z","lastTransitionTime":"2026-01-05T21:51:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:50 crc kubenswrapper[4910]: E0105 21:51:50.744752 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:50Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.748856 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.748900 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.748912 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.748936 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.748950 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:50Z","lastTransitionTime":"2026-01-05T21:51:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:50 crc kubenswrapper[4910]: E0105 21:51:50.762918 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:50Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.766586 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.766633 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.766651 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.766680 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.766698 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:50Z","lastTransitionTime":"2026-01-05T21:51:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:50 crc kubenswrapper[4910]: E0105 21:51:50.783815 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:50Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.788732 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.788781 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.788790 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.788810 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.788825 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:50Z","lastTransitionTime":"2026-01-05T21:51:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:50 crc kubenswrapper[4910]: E0105 21:51:50.809047 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:50Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:50 crc kubenswrapper[4910]: E0105 21:51:50.809306 4910 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.811644 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.811711 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.811729 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.811755 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.811773 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:50Z","lastTransitionTime":"2026-01-05T21:51:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.914769 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.914857 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.914885 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.914920 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:50 crc kubenswrapper[4910]: I0105 21:51:50.914942 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:50Z","lastTransitionTime":"2026-01-05T21:51:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.018107 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.018219 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.018239 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.018269 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.018287 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:51Z","lastTransitionTime":"2026-01-05T21:51:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.121828 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.121894 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.121906 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.121931 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.121944 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:51Z","lastTransitionTime":"2026-01-05T21:51:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.224791 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.224830 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.224840 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.224858 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.224869 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:51Z","lastTransitionTime":"2026-01-05T21:51:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.327700 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.327776 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.327797 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.327831 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.327855 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:51Z","lastTransitionTime":"2026-01-05T21:51:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.430756 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.430835 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.430853 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.430877 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.430892 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:51Z","lastTransitionTime":"2026-01-05T21:51:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.534156 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.534216 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.534234 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.534260 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.534278 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:51Z","lastTransitionTime":"2026-01-05T21:51:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.637575 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.637631 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.637650 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.637676 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.637694 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:51Z","lastTransitionTime":"2026-01-05T21:51:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.721221 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:51:51 crc kubenswrapper[4910]: E0105 21:51:51.721475 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.740110 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.740215 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.740238 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.740266 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.740284 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:51Z","lastTransitionTime":"2026-01-05T21:51:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.842561 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.842601 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.842619 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.842640 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.842657 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:51Z","lastTransitionTime":"2026-01-05T21:51:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.945888 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.945940 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.945959 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.945987 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:51 crc kubenswrapper[4910]: I0105 21:51:51.946008 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:51Z","lastTransitionTime":"2026-01-05T21:51:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.048608 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.048670 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.048684 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.048702 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.048714 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:52Z","lastTransitionTime":"2026-01-05T21:51:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.151902 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.151967 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.151995 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.152021 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.152042 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:52Z","lastTransitionTime":"2026-01-05T21:51:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.255109 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.255220 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.255256 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.255292 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.255319 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:52Z","lastTransitionTime":"2026-01-05T21:51:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.359531 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.359590 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.359600 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.359620 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.359634 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:52Z","lastTransitionTime":"2026-01-05T21:51:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.462586 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.462655 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.462672 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.462702 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.462723 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:52Z","lastTransitionTime":"2026-01-05T21:51:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.565741 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.565816 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.565835 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.565865 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.565884 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:52Z","lastTransitionTime":"2026-01-05T21:51:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.668697 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.668764 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.668784 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.668812 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.668831 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:52Z","lastTransitionTime":"2026-01-05T21:51:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.721569 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.721635 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:51:52 crc kubenswrapper[4910]: E0105 21:51:52.722071 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.721636 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:51:52 crc kubenswrapper[4910]: E0105 21:51:52.722519 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:51:52 crc kubenswrapper[4910]: E0105 21:51:52.722611 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.771888 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.771927 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.771936 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.771951 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.771963 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:52Z","lastTransitionTime":"2026-01-05T21:51:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.876206 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.876391 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.876415 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.876442 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.876468 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:52Z","lastTransitionTime":"2026-01-05T21:51:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.979533 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.979577 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.979601 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.979620 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:52 crc kubenswrapper[4910]: I0105 21:51:52.979630 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:52Z","lastTransitionTime":"2026-01-05T21:51:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.083929 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.083990 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.084003 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.084023 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.084039 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:53Z","lastTransitionTime":"2026-01-05T21:51:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.187171 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.187261 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.187288 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.187326 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.187350 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:53Z","lastTransitionTime":"2026-01-05T21:51:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.290096 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.290164 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.290174 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.290192 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.290205 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:53Z","lastTransitionTime":"2026-01-05T21:51:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.393204 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.393277 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.393303 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.393335 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.393356 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:53Z","lastTransitionTime":"2026-01-05T21:51:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.496912 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.496995 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.497015 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.497049 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.497071 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:53Z","lastTransitionTime":"2026-01-05T21:51:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.600979 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.601051 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.601071 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.601103 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.601154 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:53Z","lastTransitionTime":"2026-01-05T21:51:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.704863 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.704925 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.704938 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.704962 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.704977 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:53Z","lastTransitionTime":"2026-01-05T21:51:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.722290 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:51:53 crc kubenswrapper[4910]: E0105 21:51:53.722579 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.808338 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.808413 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.808435 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.808467 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.808487 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:53Z","lastTransitionTime":"2026-01-05T21:51:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.911829 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.911885 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.911896 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.911917 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.911929 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:53Z","lastTransitionTime":"2026-01-05T21:51:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:53 crc kubenswrapper[4910]: I0105 21:51:53.943790 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/74c455b1-4706-4ca7-bd82-2b99c3c83e3f-metrics-certs\") pod \"network-metrics-daemon-mns6n\" (UID: \"74c455b1-4706-4ca7-bd82-2b99c3c83e3f\") " pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:51:53 crc kubenswrapper[4910]: E0105 21:51:53.943999 4910 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 05 21:51:53 crc kubenswrapper[4910]: E0105 21:51:53.944107 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/74c455b1-4706-4ca7-bd82-2b99c3c83e3f-metrics-certs podName:74c455b1-4706-4ca7-bd82-2b99c3c83e3f nodeName:}" failed. No retries permitted until 2026-01-05 21:52:01.944082167 +0000 UTC m=+53.521579837 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/74c455b1-4706-4ca7-bd82-2b99c3c83e3f-metrics-certs") pod "network-metrics-daemon-mns6n" (UID: "74c455b1-4706-4ca7-bd82-2b99c3c83e3f") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.020609 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.020957 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.021098 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.021247 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.021409 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:54Z","lastTransitionTime":"2026-01-05T21:51:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.125355 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.125444 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.125461 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.125487 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.125505 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:54Z","lastTransitionTime":"2026-01-05T21:51:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.228660 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.229115 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.229385 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.229586 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.229758 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:54Z","lastTransitionTime":"2026-01-05T21:51:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.334177 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.334228 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.334240 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.334257 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.334268 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:54Z","lastTransitionTime":"2026-01-05T21:51:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.438043 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.438176 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.438206 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.438240 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.438265 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:54Z","lastTransitionTime":"2026-01-05T21:51:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.541239 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.541276 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.541285 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.541302 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.541312 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:54Z","lastTransitionTime":"2026-01-05T21:51:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.644857 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.644917 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.644932 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.644954 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.644967 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:54Z","lastTransitionTime":"2026-01-05T21:51:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.721498 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.721523 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:51:54 crc kubenswrapper[4910]: E0105 21:51:54.721706 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:51:54 crc kubenswrapper[4910]: E0105 21:51:54.721824 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.721983 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:51:54 crc kubenswrapper[4910]: E0105 21:51:54.722076 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.749232 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.749301 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.749319 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.749349 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.749401 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:54Z","lastTransitionTime":"2026-01-05T21:51:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.841572 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.852824 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.852865 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.852873 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.852888 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.852897 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:54Z","lastTransitionTime":"2026-01-05T21:51:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.855490 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.861919 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:54Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.884508 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:54Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.909465 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:54Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.934824 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:54Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.955866 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.955922 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.955946 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.955976 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.955994 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:54Z","lastTransitionTime":"2026-01-05T21:51:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.957086 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:54Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.975741 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638c1985fe0a3b8f01687e06ff21e90cc026dadac8f6ee435d9c695c85a6ae48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3f374683807a8c6f06bb4e6ff3dab813a47759f249583a9bd1b879619b1d1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-t58h4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:54Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:54 crc kubenswrapper[4910]: I0105 21:51:54.993337 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:54Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.007745 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:55Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.022051 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:55Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.035730 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:55Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.050756 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:55Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.058495 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.058707 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.058892 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.059088 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.059292 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:55Z","lastTransitionTime":"2026-01-05T21:51:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.085195 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:55Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.103939 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:55Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.124684 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:55Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.147403 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3190d750f1a136cbc29252b21fa6112912e3884803e4b9ab4d3243eddeebf5c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:55Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.169609 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.169694 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.169715 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.169746 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.169766 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:55Z","lastTransitionTime":"2026-01-05T21:51:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.174507 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8537cba3640eac3be42cb9ac3518001d5a49068a24d3b909acac62b81ba70a55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://91e79834c0a89fe97ce3ce47a557696cac36a47fc7d2dbe2f7ca2f6249de7f65\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:51:41Z\\\",\\\"message\\\":\\\" 6226 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:41.330509 6226 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:41.331048 6226 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0105 21:51:41.331070 6226 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0105 21:51:41.331084 6226 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0105 21:51:41.331089 6226 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0105 21:51:41.331142 6226 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0105 21:51:41.331158 6226 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0105 21:51:41.331175 6226 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0105 21:51:41.331192 6226 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0105 21:51:41.331196 6226 factory.go:656] Stopping watch factory\\\\nI0105 21:51:41.331197 6226 handler.go:208] Removed *v1.Node event handler 7\\\\nI0105 21:51:41.331211 6226 ovnkube.go:599] Stopped ovnkube\\\\nI0105 21:51:41.331214 6226 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0105 21:51:41.331218 6226 handler.go:208] Removed *v1.Node event handler 2\\\\nI0105 21:51:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8537cba3640eac3be42cb9ac3518001d5a49068a24d3b909acac62b81ba70a55\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"message\\\":\\\"l\\\\u003e UUID: UUIDName:}]\\\\nI0105 21:51:42.843082 6355 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0105 21:51:42.843123 6355 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:43933d5e-3c3b-4ff8-8926-04ac25de450e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0105 21:51:42.843172 6355 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:55Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.188902 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mns6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74c455b1-4706-4ca7-bd82-2b99c3c83e3f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mns6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:55Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.273482 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.273557 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.273575 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.273601 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.273617 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:55Z","lastTransitionTime":"2026-01-05T21:51:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.377160 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.377224 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.377242 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.377269 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.377289 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:55Z","lastTransitionTime":"2026-01-05T21:51:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.480430 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.480494 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.480512 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.480538 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.480558 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:55Z","lastTransitionTime":"2026-01-05T21:51:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.583691 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.583783 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.583801 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.583828 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.583847 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:55Z","lastTransitionTime":"2026-01-05T21:51:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.687693 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.687755 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.687774 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.687802 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.687820 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:55Z","lastTransitionTime":"2026-01-05T21:51:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.721367 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:51:55 crc kubenswrapper[4910]: E0105 21:51:55.721667 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.790521 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.790615 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.790639 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.790671 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.790694 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:55Z","lastTransitionTime":"2026-01-05T21:51:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.894857 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.894949 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.894971 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.895007 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.895027 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:55Z","lastTransitionTime":"2026-01-05T21:51:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.997704 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.997802 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.997822 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.997851 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:55 crc kubenswrapper[4910]: I0105 21:51:55.997873 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:55Z","lastTransitionTime":"2026-01-05T21:51:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.100102 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.100200 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.100217 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.100242 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.100260 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:56Z","lastTransitionTime":"2026-01-05T21:51:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.202589 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.202636 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.202649 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.202668 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.202681 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:56Z","lastTransitionTime":"2026-01-05T21:51:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.306717 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.306786 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.306803 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.306834 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.306853 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:56Z","lastTransitionTime":"2026-01-05T21:51:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.410222 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.410279 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.410301 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.410358 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.410383 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:56Z","lastTransitionTime":"2026-01-05T21:51:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.514546 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.515702 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.515855 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.516031 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.516216 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:56Z","lastTransitionTime":"2026-01-05T21:51:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.619193 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.619260 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.619274 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.619300 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.619317 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:56Z","lastTransitionTime":"2026-01-05T21:51:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.720711 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.720803 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:51:56 crc kubenswrapper[4910]: E0105 21:51:56.720862 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.720865 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:51:56 crc kubenswrapper[4910]: E0105 21:51:56.720974 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:51:56 crc kubenswrapper[4910]: E0105 21:51:56.721071 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.721960 4910 scope.go:117] "RemoveContainer" containerID="8537cba3640eac3be42cb9ac3518001d5a49068a24d3b909acac62b81ba70a55" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.723953 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.724054 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.724072 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.724096 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.724152 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:56Z","lastTransitionTime":"2026-01-05T21:51:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.746562 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:56Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.764112 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:56Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.789443 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:56Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.809830 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42d05c09-eb4c-4ee9-a5e2-e91e3a42ceaf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c366ec1be5116c8015777a182415c623173912f309b8dcc52e2dd58be79908ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://82b9d35b7a2b2ca1de438b27b3280478cbd8aa200a186456585bc20994359e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20c3f8271da0182ae792c01d42dc43c0732466b8d049fbc27a95f86a28da1ef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ba8aaa4607532622d43bf6d56597e26066226abc45454f5be583c22c41b108e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ba8aaa4607532622d43bf6d56597e26066226abc45454f5be583c22c41b108e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:56Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.827838 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.827921 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.827943 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.827973 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.827997 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:56Z","lastTransitionTime":"2026-01-05T21:51:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.833171 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:56Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.848404 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:56Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.862613 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638c1985fe0a3b8f01687e06ff21e90cc026dadac8f6ee435d9c695c85a6ae48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3f374683807a8c6f06bb4e6ff3dab813a47759f249583a9bd1b879619b1d1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-t58h4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:56Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.877904 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:56Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.905597 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:56Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.920573 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:56Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.931507 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.931573 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.931585 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.931609 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.931623 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:56Z","lastTransitionTime":"2026-01-05T21:51:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.936281 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:56Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.955025 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:56Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:56 crc kubenswrapper[4910]: I0105 21:51:56.991717 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:56Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.013217 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:57Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.031267 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:57Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.034349 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.034386 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.034395 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.034411 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.034422 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:57Z","lastTransitionTime":"2026-01-05T21:51:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.048359 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3190d750f1a136cbc29252b21fa6112912e3884803e4b9ab4d3243eddeebf5c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:57Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.049950 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpk76_f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b/ovnkube-controller/1.log" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.053637 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" event={"ID":"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b","Type":"ContainerStarted","Data":"b682dcc92fe6772d11c24e417c24e9441f6d7a4b401cdb894c01742b36e032a3"} Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.054414 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.072031 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8537cba3640eac3be42cb9ac3518001d5a49068a24d3b909acac62b81ba70a55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8537cba3640eac3be42cb9ac3518001d5a49068a24d3b909acac62b81ba70a55\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"message\\\":\\\"l\\\\u003e UUID: UUIDName:}]\\\\nI0105 21:51:42.843082 6355 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0105 21:51:42.843123 6355 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:43933d5e-3c3b-4ff8-8926-04ac25de450e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0105 21:51:42.843172 6355 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:42Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-fpk76_openshift-ovn-kubernetes(f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:57Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.086174 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mns6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74c455b1-4706-4ca7-bd82-2b99c3c83e3f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mns6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:57Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.110240 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:57Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.126995 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:57Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.136825 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.136886 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.136897 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.136918 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.136932 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:57Z","lastTransitionTime":"2026-01-05T21:51:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.145755 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:57Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.167307 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3190d750f1a136cbc29252b21fa6112912e3884803e4b9ab4d3243eddeebf5c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:57Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.199641 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b682dcc92fe6772d11c24e417c24e9441f6d7a4b401cdb894c01742b36e032a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8537cba3640eac3be42cb9ac3518001d5a49068a24d3b909acac62b81ba70a55\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"message\\\":\\\"l\\\\u003e UUID: UUIDName:}]\\\\nI0105 21:51:42.843082 6355 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0105 21:51:42.843123 6355 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:43933d5e-3c3b-4ff8-8926-04ac25de450e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0105 21:51:42.843172 6355 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:42Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:57Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.220842 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mns6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74c455b1-4706-4ca7-bd82-2b99c3c83e3f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mns6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:57Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.240105 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.240174 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.240189 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.240211 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.240226 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:57Z","lastTransitionTime":"2026-01-05T21:51:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.240390 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:57Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.255500 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:57Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.270176 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:57Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.284201 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42d05c09-eb4c-4ee9-a5e2-e91e3a42ceaf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c366ec1be5116c8015777a182415c623173912f309b8dcc52e2dd58be79908ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://82b9d35b7a2b2ca1de438b27b3280478cbd8aa200a186456585bc20994359e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20c3f8271da0182ae792c01d42dc43c0732466b8d049fbc27a95f86a28da1ef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ba8aaa4607532622d43bf6d56597e26066226abc45454f5be583c22c41b108e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ba8aaa4607532622d43bf6d56597e26066226abc45454f5be583c22c41b108e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:57Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.297843 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:57Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.311782 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:57Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.323750 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638c1985fe0a3b8f01687e06ff21e90cc026dadac8f6ee435d9c695c85a6ae48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3f374683807a8c6f06bb4e6ff3dab813a47759f249583a9bd1b879619b1d1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-t58h4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:57Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.335607 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:57Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.342713 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.342738 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.342747 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.342764 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.342776 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:57Z","lastTransitionTime":"2026-01-05T21:51:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.347283 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:57Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.358726 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:57Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.371303 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:57Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.383397 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:57Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.445524 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.445580 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.445592 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.445615 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.445632 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:57Z","lastTransitionTime":"2026-01-05T21:51:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.587912 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.587979 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.588001 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.588026 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.588041 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:57Z","lastTransitionTime":"2026-01-05T21:51:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.690958 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.691055 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.691085 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.691143 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.691169 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:57Z","lastTransitionTime":"2026-01-05T21:51:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.721330 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:51:57 crc kubenswrapper[4910]: E0105 21:51:57.721501 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.794613 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.794653 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.794663 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.794681 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.794694 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:57Z","lastTransitionTime":"2026-01-05T21:51:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.898365 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.898427 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.898459 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.898484 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:57 crc kubenswrapper[4910]: I0105 21:51:57.898499 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:57Z","lastTransitionTime":"2026-01-05T21:51:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.001376 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.001434 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.001453 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.001480 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.001498 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:58Z","lastTransitionTime":"2026-01-05T21:51:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.061700 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpk76_f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b/ovnkube-controller/2.log" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.062710 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpk76_f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b/ovnkube-controller/1.log" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.066820 4910 generic.go:334] "Generic (PLEG): container finished" podID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerID="b682dcc92fe6772d11c24e417c24e9441f6d7a4b401cdb894c01742b36e032a3" exitCode=1 Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.066940 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" event={"ID":"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b","Type":"ContainerDied","Data":"b682dcc92fe6772d11c24e417c24e9441f6d7a4b401cdb894c01742b36e032a3"} Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.067016 4910 scope.go:117] "RemoveContainer" containerID="8537cba3640eac3be42cb9ac3518001d5a49068a24d3b909acac62b81ba70a55" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.068230 4910 scope.go:117] "RemoveContainer" containerID="b682dcc92fe6772d11c24e417c24e9441f6d7a4b401cdb894c01742b36e032a3" Jan 05 21:51:58 crc kubenswrapper[4910]: E0105 21:51:58.068577 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-fpk76_openshift-ovn-kubernetes(f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.089690 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:58Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.103884 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.103954 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.103973 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.104000 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.104018 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:58Z","lastTransitionTime":"2026-01-05T21:51:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.110217 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:58Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.128784 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:58Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.146114 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:58Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.161406 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:58Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.179367 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:58Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.196772 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:58Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.207533 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.207602 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.207620 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.207647 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.207670 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:58Z","lastTransitionTime":"2026-01-05T21:51:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.218296 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3190d750f1a136cbc29252b21fa6112912e3884803e4b9ab4d3243eddeebf5c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:58Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.243721 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b682dcc92fe6772d11c24e417c24e9441f6d7a4b401cdb894c01742b36e032a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8537cba3640eac3be42cb9ac3518001d5a49068a24d3b909acac62b81ba70a55\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"message\\\":\\\"l\\\\u003e UUID: UUIDName:}]\\\\nI0105 21:51:42.843082 6355 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0105 21:51:42.843123 6355 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:43933d5e-3c3b-4ff8-8926-04ac25de450e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0105 21:51:42.843172 6355 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:42Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b682dcc92fe6772d11c24e417c24e9441f6d7a4b401cdb894c01742b36e032a3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:51:58Z\\\",\\\"message\\\":\\\"reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:57.693226 6579 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0105 21:51:57.693790 6579 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0105 21:51:57.693846 6579 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:57.693899 6579 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0105 21:51:57.693927 6579 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0105 21:51:57.693997 6579 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0105 21:51:57.694021 6579 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0105 21:51:57.694035 6579 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0105 21:51:57.694055 6579 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0105 21:51:57.694099 6579 factory.go:656] Stopping watch factory\\\\nI0105 21:51:57.694179 6579 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0105 21:51:57.694187 6579 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0105 21:51:57.694208 6579 handler.go:208] Removed *v1.Pod ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:58Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.257966 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mns6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74c455b1-4706-4ca7-bd82-2b99c3c83e3f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mns6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:58Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.282015 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:58Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.299527 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:58Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.309961 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.310015 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.310032 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.310060 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.310078 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:58Z","lastTransitionTime":"2026-01-05T21:51:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.320280 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:58Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.335109 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42d05c09-eb4c-4ee9-a5e2-e91e3a42ceaf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c366ec1be5116c8015777a182415c623173912f309b8dcc52e2dd58be79908ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://82b9d35b7a2b2ca1de438b27b3280478cbd8aa200a186456585bc20994359e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20c3f8271da0182ae792c01d42dc43c0732466b8d049fbc27a95f86a28da1ef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ba8aaa4607532622d43bf6d56597e26066226abc45454f5be583c22c41b108e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ba8aaa4607532622d43bf6d56597e26066226abc45454f5be583c22c41b108e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:58Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.350920 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:58Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.367100 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:58Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.387326 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638c1985fe0a3b8f01687e06ff21e90cc026dadac8f6ee435d9c695c85a6ae48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3f374683807a8c6f06bb4e6ff3dab813a47759f249583a9bd1b879619b1d1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-t58h4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:58Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.411308 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:58Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.413590 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.413668 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.413693 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.413726 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.413754 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:58Z","lastTransitionTime":"2026-01-05T21:51:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.517016 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.517086 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.517101 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.517170 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.517193 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:58Z","lastTransitionTime":"2026-01-05T21:51:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.598622 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.598823 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.598868 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.598908 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.598944 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:51:58 crc kubenswrapper[4910]: E0105 21:51:58.599091 4910 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 05 21:51:58 crc kubenswrapper[4910]: E0105 21:51:58.599187 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-05 21:52:30.599163859 +0000 UTC m=+82.176661549 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 05 21:51:58 crc kubenswrapper[4910]: E0105 21:51:58.599399 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 05 21:51:58 crc kubenswrapper[4910]: E0105 21:51:58.599452 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 05 21:51:58 crc kubenswrapper[4910]: E0105 21:51:58.599473 4910 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 05 21:51:58 crc kubenswrapper[4910]: E0105 21:51:58.599477 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:52:30.599465037 +0000 UTC m=+82.176962727 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:51:58 crc kubenswrapper[4910]: E0105 21:51:58.599479 4910 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 05 21:51:58 crc kubenswrapper[4910]: E0105 21:51:58.599399 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 05 21:51:58 crc kubenswrapper[4910]: E0105 21:51:58.599543 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 05 21:51:58 crc kubenswrapper[4910]: E0105 21:51:58.599565 4910 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 05 21:51:58 crc kubenswrapper[4910]: E0105 21:51:58.599576 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-05 21:52:30.599544239 +0000 UTC m=+82.177041939 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 05 21:51:58 crc kubenswrapper[4910]: E0105 21:51:58.599604 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-05 21:52:30.59959067 +0000 UTC m=+82.177088380 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 05 21:51:58 crc kubenswrapper[4910]: E0105 21:51:58.599631 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-05 21:52:30.59961859 +0000 UTC m=+82.177116300 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.619640 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.619702 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.619723 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.619758 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.619779 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:58Z","lastTransitionTime":"2026-01-05T21:51:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.720935 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.720990 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.721178 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:51:58 crc kubenswrapper[4910]: E0105 21:51:58.721172 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:51:58 crc kubenswrapper[4910]: E0105 21:51:58.721396 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:51:58 crc kubenswrapper[4910]: E0105 21:51:58.721532 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.723411 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.723454 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.723473 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.723496 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.723516 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:58Z","lastTransitionTime":"2026-01-05T21:51:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.746880 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638c1985fe0a3b8f01687e06ff21e90cc026dadac8f6ee435d9c695c85a6ae48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3f374683807a8c6f06bb4e6ff3dab813a47759f249583a9bd1b879619b1d1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-t58h4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:58Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.770943 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:58Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.792315 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42d05c09-eb4c-4ee9-a5e2-e91e3a42ceaf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c366ec1be5116c8015777a182415c623173912f309b8dcc52e2dd58be79908ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://82b9d35b7a2b2ca1de438b27b3280478cbd8aa200a186456585bc20994359e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20c3f8271da0182ae792c01d42dc43c0732466b8d049fbc27a95f86a28da1ef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ba8aaa4607532622d43bf6d56597e26066226abc45454f5be583c22c41b108e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ba8aaa4607532622d43bf6d56597e26066226abc45454f5be583c22c41b108e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:58Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.822004 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:58Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.832743 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.832816 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.832843 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.832880 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.832907 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:58Z","lastTransitionTime":"2026-01-05T21:51:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.850107 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:58Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.867802 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:58Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.883253 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:58Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.906642 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:58Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.930109 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:58Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.936875 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.936931 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.936949 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.936980 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.937000 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:58Z","lastTransitionTime":"2026-01-05T21:51:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.952439 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:58Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:58 crc kubenswrapper[4910]: I0105 21:51:58.992080 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b682dcc92fe6772d11c24e417c24e9441f6d7a4b401cdb894c01742b36e032a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8537cba3640eac3be42cb9ac3518001d5a49068a24d3b909acac62b81ba70a55\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"message\\\":\\\"l\\\\u003e UUID: UUIDName:}]\\\\nI0105 21:51:42.843082 6355 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0105 21:51:42.843123 6355 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:43933d5e-3c3b-4ff8-8926-04ac25de450e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0105 21:51:42.843172 6355 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:42Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b682dcc92fe6772d11c24e417c24e9441f6d7a4b401cdb894c01742b36e032a3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:51:58Z\\\",\\\"message\\\":\\\"reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:57.693226 6579 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0105 21:51:57.693790 6579 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0105 21:51:57.693846 6579 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:57.693899 6579 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0105 21:51:57.693927 6579 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0105 21:51:57.693997 6579 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0105 21:51:57.694021 6579 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0105 21:51:57.694035 6579 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0105 21:51:57.694055 6579 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0105 21:51:57.694099 6579 factory.go:656] Stopping watch factory\\\\nI0105 21:51:57.694179 6579 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0105 21:51:57.694187 6579 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0105 21:51:57.694208 6579 handler.go:208] Removed *v1.Pod ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:58Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.012913 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mns6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74c455b1-4706-4ca7-bd82-2b99c3c83e3f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mns6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:59Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.040218 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.040295 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.040306 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.040325 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.040337 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:59Z","lastTransitionTime":"2026-01-05T21:51:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.050849 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:59Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.074921 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:59Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.076088 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpk76_f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b/ovnkube-controller/2.log" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.083499 4910 scope.go:117] "RemoveContainer" containerID="b682dcc92fe6772d11c24e417c24e9441f6d7a4b401cdb894c01742b36e032a3" Jan 05 21:51:59 crc kubenswrapper[4910]: E0105 21:51:59.084164 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-fpk76_openshift-ovn-kubernetes(f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.100798 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:59Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.126508 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3190d750f1a136cbc29252b21fa6112912e3884803e4b9ab4d3243eddeebf5c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:59Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.144421 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.144540 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.145035 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.145380 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.145459 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:59Z","lastTransitionTime":"2026-01-05T21:51:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.150514 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:59Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.171196 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:59Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.193098 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:59Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.215403 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638c1985fe0a3b8f01687e06ff21e90cc026dadac8f6ee435d9c695c85a6ae48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3f374683807a8c6f06bb4e6ff3dab813a47759f249583a9bd1b879619b1d1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-t58h4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:59Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.240254 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:59Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.248996 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.249074 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.249094 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.249161 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.249183 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:59Z","lastTransitionTime":"2026-01-05T21:51:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.259611 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42d05c09-eb4c-4ee9-a5e2-e91e3a42ceaf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c366ec1be5116c8015777a182415c623173912f309b8dcc52e2dd58be79908ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://82b9d35b7a2b2ca1de438b27b3280478cbd8aa200a186456585bc20994359e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20c3f8271da0182ae792c01d42dc43c0732466b8d049fbc27a95f86a28da1ef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ba8aaa4607532622d43bf6d56597e26066226abc45454f5be583c22c41b108e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ba8aaa4607532622d43bf6d56597e26066226abc45454f5be583c22c41b108e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:59Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.283681 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:59Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.301337 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:59Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.316278 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:59Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.331408 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:59Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.353827 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.353905 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.353932 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.353969 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.353995 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:59Z","lastTransitionTime":"2026-01-05T21:51:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.354673 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:59Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.375920 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:59Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.400170 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3190d750f1a136cbc29252b21fa6112912e3884803e4b9ab4d3243eddeebf5c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:59Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.436421 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b682dcc92fe6772d11c24e417c24e9441f6d7a4b401cdb894c01742b36e032a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b682dcc92fe6772d11c24e417c24e9441f6d7a4b401cdb894c01742b36e032a3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:51:58Z\\\",\\\"message\\\":\\\"reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:57.693226 6579 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0105 21:51:57.693790 6579 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0105 21:51:57.693846 6579 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:57.693899 6579 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0105 21:51:57.693927 6579 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0105 21:51:57.693997 6579 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0105 21:51:57.694021 6579 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0105 21:51:57.694035 6579 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0105 21:51:57.694055 6579 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0105 21:51:57.694099 6579 factory.go:656] Stopping watch factory\\\\nI0105 21:51:57.694179 6579 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0105 21:51:57.694187 6579 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0105 21:51:57.694208 6579 handler.go:208] Removed *v1.Pod ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-fpk76_openshift-ovn-kubernetes(f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:59Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.458076 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.458177 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.458231 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.458261 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.458280 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:59Z","lastTransitionTime":"2026-01-05T21:51:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.459907 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mns6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74c455b1-4706-4ca7-bd82-2b99c3c83e3f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mns6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:59Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.489429 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:59Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.512299 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:59Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.533055 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:59Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.553394 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:59Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.561258 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.561302 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.561315 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.561333 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.561344 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:59Z","lastTransitionTime":"2026-01-05T21:51:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.571521 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:51:59Z is after 2025-08-24T17:21:41Z" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.664379 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.664457 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.664475 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.664511 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.664530 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:59Z","lastTransitionTime":"2026-01-05T21:51:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.720492 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:51:59 crc kubenswrapper[4910]: E0105 21:51:59.720699 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.769032 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.769173 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.769195 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.769235 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.769260 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:59Z","lastTransitionTime":"2026-01-05T21:51:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.872574 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.872632 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.872644 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.872663 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.872674 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:59Z","lastTransitionTime":"2026-01-05T21:51:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.975322 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.975402 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.975422 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.975455 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:51:59 crc kubenswrapper[4910]: I0105 21:51:59.975476 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:51:59Z","lastTransitionTime":"2026-01-05T21:51:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.079018 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.079095 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.079143 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.079172 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.079192 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:00Z","lastTransitionTime":"2026-01-05T21:52:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.182709 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.182798 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.182862 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.182926 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.182939 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:00Z","lastTransitionTime":"2026-01-05T21:52:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.285910 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.285972 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.285992 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.286021 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.286039 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:00Z","lastTransitionTime":"2026-01-05T21:52:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.389136 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.389182 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.389194 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.389213 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.389225 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:00Z","lastTransitionTime":"2026-01-05T21:52:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.492931 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.493009 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.493033 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.493069 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.493094 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:00Z","lastTransitionTime":"2026-01-05T21:52:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.597048 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.597164 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.597182 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.597208 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.597228 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:00Z","lastTransitionTime":"2026-01-05T21:52:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.700493 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.700560 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.700577 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.700605 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.700623 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:00Z","lastTransitionTime":"2026-01-05T21:52:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.721266 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:52:00 crc kubenswrapper[4910]: E0105 21:52:00.721443 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.721557 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.721684 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:52:00 crc kubenswrapper[4910]: E0105 21:52:00.721793 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:52:00 crc kubenswrapper[4910]: E0105 21:52:00.721901 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.804095 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.804180 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.804202 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.804231 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.804250 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:00Z","lastTransitionTime":"2026-01-05T21:52:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.907621 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.907690 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.907708 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.907736 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:00 crc kubenswrapper[4910]: I0105 21:52:00.907755 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:00Z","lastTransitionTime":"2026-01-05T21:52:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.011304 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.011366 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.011378 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.011398 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.011413 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:01Z","lastTransitionTime":"2026-01-05T21:52:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.035362 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.035434 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.035454 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.035515 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.035534 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:01Z","lastTransitionTime":"2026-01-05T21:52:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:01 crc kubenswrapper[4910]: E0105 21:52:01.052537 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:01Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.062940 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.063015 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.063061 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.063093 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.063115 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:01Z","lastTransitionTime":"2026-01-05T21:52:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:01 crc kubenswrapper[4910]: E0105 21:52:01.087109 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:01Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.093054 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.093104 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.093161 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.093188 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.093208 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:01Z","lastTransitionTime":"2026-01-05T21:52:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:01 crc kubenswrapper[4910]: E0105 21:52:01.117726 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:01Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.124342 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.124442 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.124472 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.124509 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.124529 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:01Z","lastTransitionTime":"2026-01-05T21:52:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:01 crc kubenswrapper[4910]: E0105 21:52:01.142603 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:01Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.147389 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.147490 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.147513 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.147544 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.147594 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:01Z","lastTransitionTime":"2026-01-05T21:52:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:01 crc kubenswrapper[4910]: E0105 21:52:01.168302 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:01Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:01 crc kubenswrapper[4910]: E0105 21:52:01.168578 4910 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.171420 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.171491 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.171519 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.171554 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.171615 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:01Z","lastTransitionTime":"2026-01-05T21:52:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.274794 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.274841 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.274855 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.274877 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.274892 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:01Z","lastTransitionTime":"2026-01-05T21:52:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.378017 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.378084 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.378104 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.378160 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.378180 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:01Z","lastTransitionTime":"2026-01-05T21:52:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.481278 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.481353 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.481375 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.481408 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.481433 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:01Z","lastTransitionTime":"2026-01-05T21:52:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.584906 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.584968 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.584984 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.585009 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.585027 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:01Z","lastTransitionTime":"2026-01-05T21:52:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.688321 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.688382 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.688400 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.688427 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.688446 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:01Z","lastTransitionTime":"2026-01-05T21:52:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.721394 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:01 crc kubenswrapper[4910]: E0105 21:52:01.721682 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.792386 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.792466 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.792491 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.792528 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.792547 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:01Z","lastTransitionTime":"2026-01-05T21:52:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.896505 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.896591 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.896613 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.896642 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.896664 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:01Z","lastTransitionTime":"2026-01-05T21:52:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.999182 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.999253 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.999278 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:01 crc kubenswrapper[4910]: I0105 21:52:01.999306 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:01.999327 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:01Z","lastTransitionTime":"2026-01-05T21:52:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.034063 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/74c455b1-4706-4ca7-bd82-2b99c3c83e3f-metrics-certs\") pod \"network-metrics-daemon-mns6n\" (UID: \"74c455b1-4706-4ca7-bd82-2b99c3c83e3f\") " pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:02 crc kubenswrapper[4910]: E0105 21:52:02.034252 4910 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 05 21:52:02 crc kubenswrapper[4910]: E0105 21:52:02.034334 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/74c455b1-4706-4ca7-bd82-2b99c3c83e3f-metrics-certs podName:74c455b1-4706-4ca7-bd82-2b99c3c83e3f nodeName:}" failed. No retries permitted until 2026-01-05 21:52:18.034309115 +0000 UTC m=+69.611806815 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/74c455b1-4706-4ca7-bd82-2b99c3c83e3f-metrics-certs") pod "network-metrics-daemon-mns6n" (UID: "74c455b1-4706-4ca7-bd82-2b99c3c83e3f") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.102382 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.102461 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.102479 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.102509 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.102530 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:02Z","lastTransitionTime":"2026-01-05T21:52:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.205946 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.206008 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.206026 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.206052 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.206069 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:02Z","lastTransitionTime":"2026-01-05T21:52:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.309056 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.309157 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.309176 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.309206 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.309225 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:02Z","lastTransitionTime":"2026-01-05T21:52:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.412258 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.412372 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.412399 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.412430 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.412497 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:02Z","lastTransitionTime":"2026-01-05T21:52:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.515433 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.515500 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.515518 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.515545 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.515564 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:02Z","lastTransitionTime":"2026-01-05T21:52:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.618649 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.618683 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.618700 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.618715 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.618736 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:02Z","lastTransitionTime":"2026-01-05T21:52:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.720507 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.720594 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.720642 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:52:02 crc kubenswrapper[4910]: E0105 21:52:02.720708 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:52:02 crc kubenswrapper[4910]: E0105 21:52:02.722343 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:52:02 crc kubenswrapper[4910]: E0105 21:52:02.722552 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.725983 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.726010 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.726020 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.726041 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.726053 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:02Z","lastTransitionTime":"2026-01-05T21:52:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.829364 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.829411 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.829423 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.829441 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.829451 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:02Z","lastTransitionTime":"2026-01-05T21:52:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.932695 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.932731 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.932741 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.932757 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:02 crc kubenswrapper[4910]: I0105 21:52:02.932768 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:02Z","lastTransitionTime":"2026-01-05T21:52:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.035016 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.035050 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.035059 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.035074 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.035083 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:03Z","lastTransitionTime":"2026-01-05T21:52:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.137945 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.138090 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.138150 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.138189 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.138216 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:03Z","lastTransitionTime":"2026-01-05T21:52:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.239978 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.240020 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.240030 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.240052 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.240063 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:03Z","lastTransitionTime":"2026-01-05T21:52:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.342999 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.343045 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.343056 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.343074 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.343084 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:03Z","lastTransitionTime":"2026-01-05T21:52:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.445263 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.445310 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.445320 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.445339 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.445350 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:03Z","lastTransitionTime":"2026-01-05T21:52:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.548553 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.548606 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.548619 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.548639 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.548653 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:03Z","lastTransitionTime":"2026-01-05T21:52:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.650766 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.650822 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.650839 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.650862 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.650879 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:03Z","lastTransitionTime":"2026-01-05T21:52:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.720522 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:03 crc kubenswrapper[4910]: E0105 21:52:03.720736 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.754318 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.754373 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.754393 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.754419 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.754437 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:03Z","lastTransitionTime":"2026-01-05T21:52:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.857460 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.857505 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.857518 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.857537 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.857551 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:03Z","lastTransitionTime":"2026-01-05T21:52:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.960702 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.960745 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.960756 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.960776 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:03 crc kubenswrapper[4910]: I0105 21:52:03.960787 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:03Z","lastTransitionTime":"2026-01-05T21:52:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.063634 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.063679 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.063689 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.063711 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.063725 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:04Z","lastTransitionTime":"2026-01-05T21:52:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.165530 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.165569 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.165576 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.165591 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.165600 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:04Z","lastTransitionTime":"2026-01-05T21:52:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.268277 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.268544 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.268553 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.268566 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.268576 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:04Z","lastTransitionTime":"2026-01-05T21:52:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.370383 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.370432 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.370453 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.370501 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.370527 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:04Z","lastTransitionTime":"2026-01-05T21:52:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.473069 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.473105 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.473140 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.473161 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.473173 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:04Z","lastTransitionTime":"2026-01-05T21:52:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.576331 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.576395 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.576419 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.576449 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.576471 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:04Z","lastTransitionTime":"2026-01-05T21:52:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.678721 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.678772 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.678788 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.678811 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.678828 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:04Z","lastTransitionTime":"2026-01-05T21:52:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.720467 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.720512 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:52:04 crc kubenswrapper[4910]: E0105 21:52:04.720631 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.720693 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:52:04 crc kubenswrapper[4910]: E0105 21:52:04.720843 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:52:04 crc kubenswrapper[4910]: E0105 21:52:04.720892 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.782014 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.782072 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.782089 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.782166 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.782186 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:04Z","lastTransitionTime":"2026-01-05T21:52:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.884644 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.884707 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.884724 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.884753 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.884771 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:04Z","lastTransitionTime":"2026-01-05T21:52:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.989063 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.989179 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.989196 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.989231 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:04 crc kubenswrapper[4910]: I0105 21:52:04.989249 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:04Z","lastTransitionTime":"2026-01-05T21:52:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.099872 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.099953 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.099980 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.100018 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.100046 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:05Z","lastTransitionTime":"2026-01-05T21:52:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.203833 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.203887 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.203905 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.203930 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.203945 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:05Z","lastTransitionTime":"2026-01-05T21:52:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.306991 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.307051 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.307073 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.307105 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.307169 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:05Z","lastTransitionTime":"2026-01-05T21:52:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.409488 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.409515 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.409523 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.409537 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.409545 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:05Z","lastTransitionTime":"2026-01-05T21:52:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.512242 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.512281 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.512292 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.512309 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.512321 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:05Z","lastTransitionTime":"2026-01-05T21:52:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.614832 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.614874 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.614886 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.614904 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.614917 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:05Z","lastTransitionTime":"2026-01-05T21:52:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.718211 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.718244 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.718251 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.718267 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.718277 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:05Z","lastTransitionTime":"2026-01-05T21:52:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.720909 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:05 crc kubenswrapper[4910]: E0105 21:52:05.721012 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.822017 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.822092 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.822112 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.822174 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.822200 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:05Z","lastTransitionTime":"2026-01-05T21:52:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.925351 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.925430 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.925450 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.925483 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:05 crc kubenswrapper[4910]: I0105 21:52:05.925507 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:05Z","lastTransitionTime":"2026-01-05T21:52:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.028401 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.028515 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.028539 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.028567 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.028588 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:06Z","lastTransitionTime":"2026-01-05T21:52:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.131951 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.132022 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.132039 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.132063 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.132078 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:06Z","lastTransitionTime":"2026-01-05T21:52:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.235315 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.235370 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.235384 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.235406 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.235426 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:06Z","lastTransitionTime":"2026-01-05T21:52:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.338301 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.338383 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.338395 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.338416 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.338445 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:06Z","lastTransitionTime":"2026-01-05T21:52:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.441448 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.441526 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.441550 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.441582 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.441606 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:06Z","lastTransitionTime":"2026-01-05T21:52:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.545485 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.545545 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.545563 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.545591 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.545611 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:06Z","lastTransitionTime":"2026-01-05T21:52:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.648736 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.648808 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.648825 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.648854 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.648874 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:06Z","lastTransitionTime":"2026-01-05T21:52:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.720779 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.720906 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.720997 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:52:06 crc kubenswrapper[4910]: E0105 21:52:06.721252 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:52:06 crc kubenswrapper[4910]: E0105 21:52:06.721397 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:52:06 crc kubenswrapper[4910]: E0105 21:52:06.721572 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.751465 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.751521 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.751572 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.751597 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.751616 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:06Z","lastTransitionTime":"2026-01-05T21:52:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.854945 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.855039 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.855065 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.855098 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.855170 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:06Z","lastTransitionTime":"2026-01-05T21:52:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.957889 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.957960 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.957979 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.958008 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:06 crc kubenswrapper[4910]: I0105 21:52:06.958033 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:06Z","lastTransitionTime":"2026-01-05T21:52:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.061855 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.061925 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.061937 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.061959 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.061973 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:07Z","lastTransitionTime":"2026-01-05T21:52:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.165207 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.165262 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.165274 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.165297 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.165310 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:07Z","lastTransitionTime":"2026-01-05T21:52:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.268791 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.268866 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.268886 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.268916 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.268934 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:07Z","lastTransitionTime":"2026-01-05T21:52:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.372039 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.372074 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.372082 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.372098 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.372108 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:07Z","lastTransitionTime":"2026-01-05T21:52:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.475017 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.475095 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.475114 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.475181 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.475203 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:07Z","lastTransitionTime":"2026-01-05T21:52:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.578182 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.578238 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.578258 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.578287 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.578307 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:07Z","lastTransitionTime":"2026-01-05T21:52:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.681371 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.681456 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.681481 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.681517 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.681542 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:07Z","lastTransitionTime":"2026-01-05T21:52:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.720660 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:07 crc kubenswrapper[4910]: E0105 21:52:07.720867 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.786598 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.786666 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.786687 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.786722 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.786745 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:07Z","lastTransitionTime":"2026-01-05T21:52:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.890805 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.890858 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.890868 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.890890 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.890901 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:07Z","lastTransitionTime":"2026-01-05T21:52:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.994064 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.994199 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.994225 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.994260 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:07 crc kubenswrapper[4910]: I0105 21:52:07.994284 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:07Z","lastTransitionTime":"2026-01-05T21:52:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.096971 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.097053 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.097071 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.097096 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.097115 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:08Z","lastTransitionTime":"2026-01-05T21:52:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.200222 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.200278 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.200295 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.200326 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.200350 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:08Z","lastTransitionTime":"2026-01-05T21:52:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.304299 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.304364 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.304387 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.304419 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.304444 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:08Z","lastTransitionTime":"2026-01-05T21:52:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.408052 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.408116 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.408177 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.408209 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.408232 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:08Z","lastTransitionTime":"2026-01-05T21:52:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.511709 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.511769 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.511781 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.511801 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.511817 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:08Z","lastTransitionTime":"2026-01-05T21:52:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.614925 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.615004 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.615028 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.615059 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.615079 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:08Z","lastTransitionTime":"2026-01-05T21:52:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.718756 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.718845 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.718868 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.718900 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.718934 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:08Z","lastTransitionTime":"2026-01-05T21:52:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.720624 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.720645 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.720773 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:52:08 crc kubenswrapper[4910]: E0105 21:52:08.721255 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:52:08 crc kubenswrapper[4910]: E0105 21:52:08.721521 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:52:08 crc kubenswrapper[4910]: E0105 21:52:08.722696 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.745684 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42d05c09-eb4c-4ee9-a5e2-e91e3a42ceaf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c366ec1be5116c8015777a182415c623173912f309b8dcc52e2dd58be79908ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://82b9d35b7a2b2ca1de438b27b3280478cbd8aa200a186456585bc20994359e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20c3f8271da0182ae792c01d42dc43c0732466b8d049fbc27a95f86a28da1ef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ba8aaa4607532622d43bf6d56597e26066226abc45454f5be583c22c41b108e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ba8aaa4607532622d43bf6d56597e26066226abc45454f5be583c22c41b108e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:08Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.761616 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:08Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.777382 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:08Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.791371 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638c1985fe0a3b8f01687e06ff21e90cc026dadac8f6ee435d9c695c85a6ae48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3f374683807a8c6f06bb4e6ff3dab813a47759f249583a9bd1b879619b1d1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-t58h4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:08Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.811746 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:08Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.821384 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.821433 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.821506 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.821536 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.821553 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:08Z","lastTransitionTime":"2026-01-05T21:52:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.829714 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:08Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.844432 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:08Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.858609 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:08Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.869519 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:08Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.881394 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:08Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.895106 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:08Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.909729 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:08Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.925352 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.925488 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.925510 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.925533 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.925544 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:08Z","lastTransitionTime":"2026-01-05T21:52:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.925770 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3190d750f1a136cbc29252b21fa6112912e3884803e4b9ab4d3243eddeebf5c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:08Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.946673 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b682dcc92fe6772d11c24e417c24e9441f6d7a4b401cdb894c01742b36e032a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b682dcc92fe6772d11c24e417c24e9441f6d7a4b401cdb894c01742b36e032a3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:51:58Z\\\",\\\"message\\\":\\\"reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:57.693226 6579 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0105 21:51:57.693790 6579 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0105 21:51:57.693846 6579 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:57.693899 6579 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0105 21:51:57.693927 6579 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0105 21:51:57.693997 6579 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0105 21:51:57.694021 6579 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0105 21:51:57.694035 6579 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0105 21:51:57.694055 6579 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0105 21:51:57.694099 6579 factory.go:656] Stopping watch factory\\\\nI0105 21:51:57.694179 6579 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0105 21:51:57.694187 6579 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0105 21:51:57.694208 6579 handler.go:208] Removed *v1.Pod ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-fpk76_openshift-ovn-kubernetes(f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:08Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.962489 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mns6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74c455b1-4706-4ca7-bd82-2b99c3c83e3f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mns6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:08Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.985171 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:08Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:08 crc kubenswrapper[4910]: I0105 21:52:08.998069 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:08Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.019358 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:09Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.031155 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.031273 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.031309 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.031357 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.031404 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:09Z","lastTransitionTime":"2026-01-05T21:52:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.135049 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.135143 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.135161 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.135196 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.135216 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:09Z","lastTransitionTime":"2026-01-05T21:52:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.238213 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.238271 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.238288 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.238313 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.238334 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:09Z","lastTransitionTime":"2026-01-05T21:52:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.341791 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.341843 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.341852 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.341874 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.341885 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:09Z","lastTransitionTime":"2026-01-05T21:52:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.445348 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.445419 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.445433 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.445457 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.445472 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:09Z","lastTransitionTime":"2026-01-05T21:52:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.548891 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.548942 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.548953 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.548972 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.548984 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:09Z","lastTransitionTime":"2026-01-05T21:52:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.652558 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.652615 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.652631 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.652656 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.652673 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:09Z","lastTransitionTime":"2026-01-05T21:52:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.720956 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:09 crc kubenswrapper[4910]: E0105 21:52:09.721151 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.755865 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.755946 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.755971 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.756008 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.756050 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:09Z","lastTransitionTime":"2026-01-05T21:52:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.859721 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.859784 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.859805 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.859832 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.859851 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:09Z","lastTransitionTime":"2026-01-05T21:52:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.962931 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.962989 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.963004 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.963024 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:09 crc kubenswrapper[4910]: I0105 21:52:09.963036 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:09Z","lastTransitionTime":"2026-01-05T21:52:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.066157 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.066219 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.066233 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.066255 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.066268 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:10Z","lastTransitionTime":"2026-01-05T21:52:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.169234 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.169309 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.169327 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.169356 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.169379 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:10Z","lastTransitionTime":"2026-01-05T21:52:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.272065 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.272167 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.272186 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.272215 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.272233 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:10Z","lastTransitionTime":"2026-01-05T21:52:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.375957 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.376028 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.376058 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.376096 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.376150 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:10Z","lastTransitionTime":"2026-01-05T21:52:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.481058 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.481099 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.481109 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.481137 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.481147 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:10Z","lastTransitionTime":"2026-01-05T21:52:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.583792 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.583842 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.583860 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.583890 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.583906 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:10Z","lastTransitionTime":"2026-01-05T21:52:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.687405 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.687510 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.687529 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.687557 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.687576 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:10Z","lastTransitionTime":"2026-01-05T21:52:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.720828 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.720922 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:52:10 crc kubenswrapper[4910]: E0105 21:52:10.721064 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:52:10 crc kubenswrapper[4910]: E0105 21:52:10.721164 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.721630 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:52:10 crc kubenswrapper[4910]: E0105 21:52:10.721786 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.790970 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.791018 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.791037 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.791063 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.791080 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:10Z","lastTransitionTime":"2026-01-05T21:52:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.894185 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.894231 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.894248 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.894272 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.894290 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:10Z","lastTransitionTime":"2026-01-05T21:52:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.997538 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.997590 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.997608 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.997633 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:10 crc kubenswrapper[4910]: I0105 21:52:10.997651 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:10Z","lastTransitionTime":"2026-01-05T21:52:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.100447 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.100479 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.100489 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.100504 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.100514 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:11Z","lastTransitionTime":"2026-01-05T21:52:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.203027 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.203060 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.203069 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.203084 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.203093 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:11Z","lastTransitionTime":"2026-01-05T21:52:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.306402 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.306433 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.306440 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.306455 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.306465 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:11Z","lastTransitionTime":"2026-01-05T21:52:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.362946 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.363034 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.363057 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.363090 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.363148 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:11Z","lastTransitionTime":"2026-01-05T21:52:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:11 crc kubenswrapper[4910]: E0105 21:52:11.384562 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:11Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.389748 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.389811 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.389827 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.389853 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.389869 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:11Z","lastTransitionTime":"2026-01-05T21:52:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:11 crc kubenswrapper[4910]: E0105 21:52:11.408420 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:11Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.412461 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.412509 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.412523 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.412545 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.412558 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:11Z","lastTransitionTime":"2026-01-05T21:52:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:11 crc kubenswrapper[4910]: E0105 21:52:11.429385 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:11Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.433170 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.433217 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.433229 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.433247 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.433260 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:11Z","lastTransitionTime":"2026-01-05T21:52:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:11 crc kubenswrapper[4910]: E0105 21:52:11.450042 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:11Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.455221 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.455286 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.455311 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.455339 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.455361 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:11Z","lastTransitionTime":"2026-01-05T21:52:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:11 crc kubenswrapper[4910]: E0105 21:52:11.474818 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:11Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:11 crc kubenswrapper[4910]: E0105 21:52:11.474941 4910 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.477295 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.477349 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.477366 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.477395 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.477413 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:11Z","lastTransitionTime":"2026-01-05T21:52:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.579997 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.580033 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.580042 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.580059 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.580069 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:11Z","lastTransitionTime":"2026-01-05T21:52:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.683070 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.683135 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.683148 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.683170 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.683184 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:11Z","lastTransitionTime":"2026-01-05T21:52:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.720826 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:11 crc kubenswrapper[4910]: E0105 21:52:11.721043 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.786448 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.786525 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.786535 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.786556 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.786566 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:11Z","lastTransitionTime":"2026-01-05T21:52:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.889812 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.889907 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.889921 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.889939 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.889949 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:11Z","lastTransitionTime":"2026-01-05T21:52:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.992513 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.992562 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.992573 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.992591 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:11 crc kubenswrapper[4910]: I0105 21:52:11.992603 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:11Z","lastTransitionTime":"2026-01-05T21:52:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.095306 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.095339 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.095347 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.095363 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.095372 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:12Z","lastTransitionTime":"2026-01-05T21:52:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.198373 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.198427 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.198439 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.198462 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.198477 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:12Z","lastTransitionTime":"2026-01-05T21:52:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.300763 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.300797 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.300805 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.300820 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.300829 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:12Z","lastTransitionTime":"2026-01-05T21:52:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.403679 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.403736 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.403746 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.403765 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.403776 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:12Z","lastTransitionTime":"2026-01-05T21:52:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.506647 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.506698 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.506709 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.506727 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.506737 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:12Z","lastTransitionTime":"2026-01-05T21:52:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.610214 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.610280 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.610298 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.610322 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.610338 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:12Z","lastTransitionTime":"2026-01-05T21:52:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.713841 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.713909 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.713926 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.713956 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.713974 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:12Z","lastTransitionTime":"2026-01-05T21:52:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.721318 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.721335 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.721565 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:52:12 crc kubenswrapper[4910]: E0105 21:52:12.721654 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:52:12 crc kubenswrapper[4910]: E0105 21:52:12.721754 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:52:12 crc kubenswrapper[4910]: E0105 21:52:12.722318 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.723156 4910 scope.go:117] "RemoveContainer" containerID="b682dcc92fe6772d11c24e417c24e9441f6d7a4b401cdb894c01742b36e032a3" Jan 05 21:52:12 crc kubenswrapper[4910]: E0105 21:52:12.723592 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-fpk76_openshift-ovn-kubernetes(f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.816954 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.817033 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.817059 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.817095 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.817159 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:12Z","lastTransitionTime":"2026-01-05T21:52:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.920291 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.920332 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.920341 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.920356 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:12 crc kubenswrapper[4910]: I0105 21:52:12.920366 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:12Z","lastTransitionTime":"2026-01-05T21:52:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.022958 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.023035 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.023055 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.023082 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.023105 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:13Z","lastTransitionTime":"2026-01-05T21:52:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.125984 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.126030 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.126038 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.126057 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.126067 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:13Z","lastTransitionTime":"2026-01-05T21:52:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.228093 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.228144 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.228152 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.228166 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.228176 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:13Z","lastTransitionTime":"2026-01-05T21:52:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.330741 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.330803 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.330814 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.330841 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.330877 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:13Z","lastTransitionTime":"2026-01-05T21:52:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.434785 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.434846 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.434897 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.434923 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.434938 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:13Z","lastTransitionTime":"2026-01-05T21:52:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.538731 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.538795 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.538807 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.538830 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.538844 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:13Z","lastTransitionTime":"2026-01-05T21:52:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.641924 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.641981 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.641995 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.642016 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.642030 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:13Z","lastTransitionTime":"2026-01-05T21:52:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.721452 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:13 crc kubenswrapper[4910]: E0105 21:52:13.721735 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.745547 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.745615 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.745634 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.745663 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.745683 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:13Z","lastTransitionTime":"2026-01-05T21:52:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.848491 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.848554 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.848568 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.848587 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.848599 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:13Z","lastTransitionTime":"2026-01-05T21:52:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.951534 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.951604 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.951627 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.951657 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:13 crc kubenswrapper[4910]: I0105 21:52:13.951680 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:13Z","lastTransitionTime":"2026-01-05T21:52:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.054230 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.054307 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.054328 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.054356 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.054375 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:14Z","lastTransitionTime":"2026-01-05T21:52:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.157406 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.157465 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.157484 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.157512 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.157528 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:14Z","lastTransitionTime":"2026-01-05T21:52:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.260976 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.261058 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.261081 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.261150 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.261183 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:14Z","lastTransitionTime":"2026-01-05T21:52:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.363832 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.363909 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.363928 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.363956 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.363977 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:14Z","lastTransitionTime":"2026-01-05T21:52:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.467575 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.467648 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.467666 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.467692 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.467712 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:14Z","lastTransitionTime":"2026-01-05T21:52:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.570100 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.570177 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.570190 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.570213 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.570227 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:14Z","lastTransitionTime":"2026-01-05T21:52:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.673346 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.673388 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.673400 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.673420 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.673437 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:14Z","lastTransitionTime":"2026-01-05T21:52:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.720695 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.720738 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.720822 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:52:14 crc kubenswrapper[4910]: E0105 21:52:14.720858 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:52:14 crc kubenswrapper[4910]: E0105 21:52:14.720980 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:52:14 crc kubenswrapper[4910]: E0105 21:52:14.721140 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.776819 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.776851 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.776860 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.776877 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.776889 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:14Z","lastTransitionTime":"2026-01-05T21:52:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.879582 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.879661 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.879685 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.879722 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.879745 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:14Z","lastTransitionTime":"2026-01-05T21:52:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.983578 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.983644 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.983658 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.983680 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:14 crc kubenswrapper[4910]: I0105 21:52:14.983693 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:14Z","lastTransitionTime":"2026-01-05T21:52:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.086721 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.086774 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.086783 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.086803 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.086816 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:15Z","lastTransitionTime":"2026-01-05T21:52:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.189562 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.189628 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.189642 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.189664 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.189678 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:15Z","lastTransitionTime":"2026-01-05T21:52:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.292557 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.292603 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.292611 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.292629 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.292638 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:15Z","lastTransitionTime":"2026-01-05T21:52:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.395615 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.395673 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.395684 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.395707 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.395727 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:15Z","lastTransitionTime":"2026-01-05T21:52:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.498098 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.498165 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.498177 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.498200 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.498211 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:15Z","lastTransitionTime":"2026-01-05T21:52:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.600539 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.600590 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.600605 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.600627 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.600642 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:15Z","lastTransitionTime":"2026-01-05T21:52:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.703260 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.703310 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.703324 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.703346 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.703359 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:15Z","lastTransitionTime":"2026-01-05T21:52:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.720638 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:15 crc kubenswrapper[4910]: E0105 21:52:15.720844 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.806055 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.806106 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.806148 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.806165 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.806173 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:15Z","lastTransitionTime":"2026-01-05T21:52:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.909111 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.909165 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.909177 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.909198 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:15 crc kubenswrapper[4910]: I0105 21:52:15.909213 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:15Z","lastTransitionTime":"2026-01-05T21:52:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.011618 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.011661 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.011683 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.011694 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.011702 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:16Z","lastTransitionTime":"2026-01-05T21:52:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.114232 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.114302 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.114320 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.114348 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.114369 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:16Z","lastTransitionTime":"2026-01-05T21:52:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.217286 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.217345 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.217356 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.217377 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.217390 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:16Z","lastTransitionTime":"2026-01-05T21:52:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.319401 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.319439 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.319451 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.319469 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.319480 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:16Z","lastTransitionTime":"2026-01-05T21:52:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.421978 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.422020 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.422030 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.422050 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.422063 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:16Z","lastTransitionTime":"2026-01-05T21:52:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.524326 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.524364 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.524373 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.524388 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.524397 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:16Z","lastTransitionTime":"2026-01-05T21:52:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.627510 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.627592 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.627608 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.627630 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.627646 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:16Z","lastTransitionTime":"2026-01-05T21:52:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.720635 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.720691 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.720720 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:52:16 crc kubenswrapper[4910]: E0105 21:52:16.720869 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:52:16 crc kubenswrapper[4910]: E0105 21:52:16.720926 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:52:16 crc kubenswrapper[4910]: E0105 21:52:16.721023 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.729900 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.729945 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.729958 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.729975 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.729992 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:16Z","lastTransitionTime":"2026-01-05T21:52:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.832447 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.832510 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.832525 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.832549 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.832564 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:16Z","lastTransitionTime":"2026-01-05T21:52:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.934997 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.935068 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.935079 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.935100 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:16 crc kubenswrapper[4910]: I0105 21:52:16.935113 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:16Z","lastTransitionTime":"2026-01-05T21:52:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.037485 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.037540 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.037549 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.037574 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.037584 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:17Z","lastTransitionTime":"2026-01-05T21:52:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.140375 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.140443 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.140458 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.140486 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.140502 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:17Z","lastTransitionTime":"2026-01-05T21:52:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.243459 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.243518 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.243526 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.243544 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.243554 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:17Z","lastTransitionTime":"2026-01-05T21:52:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.345971 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.346021 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.346032 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.346051 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.346062 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:17Z","lastTransitionTime":"2026-01-05T21:52:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.448803 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.448845 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.448856 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.448877 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.448886 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:17Z","lastTransitionTime":"2026-01-05T21:52:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.552215 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.552286 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.552301 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.552323 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.552340 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:17Z","lastTransitionTime":"2026-01-05T21:52:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.656170 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.656247 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.656269 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.656299 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.656320 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:17Z","lastTransitionTime":"2026-01-05T21:52:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.720640 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:17 crc kubenswrapper[4910]: E0105 21:52:17.720812 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.759029 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.759179 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.759204 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.759240 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.759266 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:17Z","lastTransitionTime":"2026-01-05T21:52:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.862380 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.862454 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.862475 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.862504 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.862522 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:17Z","lastTransitionTime":"2026-01-05T21:52:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.965680 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.965742 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.965755 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.965779 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:17 crc kubenswrapper[4910]: I0105 21:52:17.965793 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:17Z","lastTransitionTime":"2026-01-05T21:52:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.068778 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.069384 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.069408 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.069435 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.069452 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:18Z","lastTransitionTime":"2026-01-05T21:52:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.103929 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/74c455b1-4706-4ca7-bd82-2b99c3c83e3f-metrics-certs\") pod \"network-metrics-daemon-mns6n\" (UID: \"74c455b1-4706-4ca7-bd82-2b99c3c83e3f\") " pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:18 crc kubenswrapper[4910]: E0105 21:52:18.104185 4910 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 05 21:52:18 crc kubenswrapper[4910]: E0105 21:52:18.104278 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/74c455b1-4706-4ca7-bd82-2b99c3c83e3f-metrics-certs podName:74c455b1-4706-4ca7-bd82-2b99c3c83e3f nodeName:}" failed. No retries permitted until 2026-01-05 21:52:50.104253834 +0000 UTC m=+101.681751504 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/74c455b1-4706-4ca7-bd82-2b99c3c83e3f-metrics-certs") pod "network-metrics-daemon-mns6n" (UID: "74c455b1-4706-4ca7-bd82-2b99c3c83e3f") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.153893 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-9zscm_07ebbe82-9e6e-47a5-91a7-4b515efc78db/kube-multus/0.log" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.153953 4910 generic.go:334] "Generic (PLEG): container finished" podID="07ebbe82-9e6e-47a5-91a7-4b515efc78db" containerID="3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8" exitCode=1 Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.153994 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-9zscm" event={"ID":"07ebbe82-9e6e-47a5-91a7-4b515efc78db","Type":"ContainerDied","Data":"3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8"} Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.154472 4910 scope.go:117] "RemoveContainer" containerID="3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.172467 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.172682 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.172745 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.172833 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.172961 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:18Z","lastTransitionTime":"2026-01-05T21:52:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.173420 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.189453 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.210780 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.228078 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.241733 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.273840 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.275238 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.275298 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.275323 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.275360 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.275380 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:18Z","lastTransitionTime":"2026-01-05T21:52:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.293764 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.310156 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.360944 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3190d750f1a136cbc29252b21fa6112912e3884803e4b9ab4d3243eddeebf5c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.380177 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.380201 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.380209 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.380225 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.380235 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:18Z","lastTransitionTime":"2026-01-05T21:52:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.381197 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b682dcc92fe6772d11c24e417c24e9441f6d7a4b401cdb894c01742b36e032a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b682dcc92fe6772d11c24e417c24e9441f6d7a4b401cdb894c01742b36e032a3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:51:58Z\\\",\\\"message\\\":\\\"reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:57.693226 6579 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0105 21:51:57.693790 6579 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0105 21:51:57.693846 6579 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:57.693899 6579 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0105 21:51:57.693927 6579 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0105 21:51:57.693997 6579 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0105 21:51:57.694021 6579 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0105 21:51:57.694035 6579 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0105 21:51:57.694055 6579 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0105 21:51:57.694099 6579 factory.go:656] Stopping watch factory\\\\nI0105 21:51:57.694179 6579 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0105 21:51:57.694187 6579 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0105 21:51:57.694208 6579 handler.go:208] Removed *v1.Pod ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-fpk76_openshift-ovn-kubernetes(f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.392485 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mns6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74c455b1-4706-4ca7-bd82-2b99c3c83e3f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mns6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.404965 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:52:18Z\\\",\\\"message\\\":\\\"2026-01-05T21:51:32+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_53905aff-a543-4a55-8508-3702b4400eb8\\\\n2026-01-05T21:51:32+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_53905aff-a543-4a55-8508-3702b4400eb8 to /host/opt/cni/bin/\\\\n2026-01-05T21:51:33Z [verbose] multus-daemon started\\\\n2026-01-05T21:51:33Z [verbose] Readiness Indicator file check\\\\n2026-01-05T21:52:18Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.415949 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.435457 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.450613 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42d05c09-eb4c-4ee9-a5e2-e91e3a42ceaf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c366ec1be5116c8015777a182415c623173912f309b8dcc52e2dd58be79908ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://82b9d35b7a2b2ca1de438b27b3280478cbd8aa200a186456585bc20994359e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20c3f8271da0182ae792c01d42dc43c0732466b8d049fbc27a95f86a28da1ef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ba8aaa4607532622d43bf6d56597e26066226abc45454f5be583c22c41b108e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ba8aaa4607532622d43bf6d56597e26066226abc45454f5be583c22c41b108e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.468602 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.483097 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.483812 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.483844 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.483854 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.483871 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.483883 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:18Z","lastTransitionTime":"2026-01-05T21:52:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.496856 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638c1985fe0a3b8f01687e06ff21e90cc026dadac8f6ee435d9c695c85a6ae48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3f374683807a8c6f06bb4e6ff3dab813a47759f249583a9bd1b879619b1d1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-t58h4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.587816 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.587917 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.587929 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.587948 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.587957 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:18Z","lastTransitionTime":"2026-01-05T21:52:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.690855 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.690929 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.690951 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.690981 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.691004 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:18Z","lastTransitionTime":"2026-01-05T21:52:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.720824 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:52:18 crc kubenswrapper[4910]: E0105 21:52:18.721006 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.721269 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.721348 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:52:18 crc kubenswrapper[4910]: E0105 21:52:18.721555 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:52:18 crc kubenswrapper[4910]: E0105 21:52:18.721692 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.739834 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.756203 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.772892 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.789465 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.797063 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.797168 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.797221 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.797254 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.797275 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:18Z","lastTransitionTime":"2026-01-05T21:52:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.809948 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.825795 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mns6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74c455b1-4706-4ca7-bd82-2b99c3c83e3f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mns6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.852038 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.865048 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.877884 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.892299 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3190d750f1a136cbc29252b21fa6112912e3884803e4b9ab4d3243eddeebf5c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.901577 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.901644 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.901667 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.901697 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.901720 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:18Z","lastTransitionTime":"2026-01-05T21:52:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.915683 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b682dcc92fe6772d11c24e417c24e9441f6d7a4b401cdb894c01742b36e032a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b682dcc92fe6772d11c24e417c24e9441f6d7a4b401cdb894c01742b36e032a3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:51:58Z\\\",\\\"message\\\":\\\"reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:57.693226 6579 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0105 21:51:57.693790 6579 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0105 21:51:57.693846 6579 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:57.693899 6579 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0105 21:51:57.693927 6579 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0105 21:51:57.693997 6579 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0105 21:51:57.694021 6579 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0105 21:51:57.694035 6579 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0105 21:51:57.694055 6579 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0105 21:51:57.694099 6579 factory.go:656] Stopping watch factory\\\\nI0105 21:51:57.694179 6579 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0105 21:51:57.694187 6579 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0105 21:51:57.694208 6579 handler.go:208] Removed *v1.Pod ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-fpk76_openshift-ovn-kubernetes(f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.932560 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:52:18Z\\\",\\\"message\\\":\\\"2026-01-05T21:51:32+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_53905aff-a543-4a55-8508-3702b4400eb8\\\\n2026-01-05T21:51:32+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_53905aff-a543-4a55-8508-3702b4400eb8 to /host/opt/cni/bin/\\\\n2026-01-05T21:51:33Z [verbose] multus-daemon started\\\\n2026-01-05T21:51:33Z [verbose] Readiness Indicator file check\\\\n2026-01-05T21:52:18Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.948713 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.968320 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.981247 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42d05c09-eb4c-4ee9-a5e2-e91e3a42ceaf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c366ec1be5116c8015777a182415c623173912f309b8dcc52e2dd58be79908ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://82b9d35b7a2b2ca1de438b27b3280478cbd8aa200a186456585bc20994359e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20c3f8271da0182ae792c01d42dc43c0732466b8d049fbc27a95f86a28da1ef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ba8aaa4607532622d43bf6d56597e26066226abc45454f5be583c22c41b108e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ba8aaa4607532622d43bf6d56597e26066226abc45454f5be583c22c41b108e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:18 crc kubenswrapper[4910]: I0105 21:52:18.993954 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:18Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.005103 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.005408 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.005495 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.005583 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.005650 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:19Z","lastTransitionTime":"2026-01-05T21:52:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.008716 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:19Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.022685 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638c1985fe0a3b8f01687e06ff21e90cc026dadac8f6ee435d9c695c85a6ae48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3f374683807a8c6f06bb4e6ff3dab813a47759f249583a9bd1b879619b1d1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-t58h4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:19Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.107693 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.107740 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.107749 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.107765 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.107777 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:19Z","lastTransitionTime":"2026-01-05T21:52:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.160585 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-9zscm_07ebbe82-9e6e-47a5-91a7-4b515efc78db/kube-multus/0.log" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.160688 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-9zscm" event={"ID":"07ebbe82-9e6e-47a5-91a7-4b515efc78db","Type":"ContainerStarted","Data":"8f84f3608a1f16a89bb0b2bd33ddfd1fd31073c40e4528dd2de478f96cf60a75"} Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.185465 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:19Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.203223 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42d05c09-eb4c-4ee9-a5e2-e91e3a42ceaf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c366ec1be5116c8015777a182415c623173912f309b8dcc52e2dd58be79908ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://82b9d35b7a2b2ca1de438b27b3280478cbd8aa200a186456585bc20994359e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20c3f8271da0182ae792c01d42dc43c0732466b8d049fbc27a95f86a28da1ef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ba8aaa4607532622d43bf6d56597e26066226abc45454f5be583c22c41b108e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ba8aaa4607532622d43bf6d56597e26066226abc45454f5be583c22c41b108e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:19Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.210135 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.210214 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.210229 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.210252 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.210268 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:19Z","lastTransitionTime":"2026-01-05T21:52:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.219258 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:19Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.239488 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:19Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.253417 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638c1985fe0a3b8f01687e06ff21e90cc026dadac8f6ee435d9c695c85a6ae48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3f374683807a8c6f06bb4e6ff3dab813a47759f249583a9bd1b879619b1d1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-t58h4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:19Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.269518 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:19Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.285775 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:19Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.303539 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:19Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.313968 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.314027 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.314038 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.314058 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.314073 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:19Z","lastTransitionTime":"2026-01-05T21:52:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.318925 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:19Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.333664 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:19Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.364443 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:19Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.385044 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:19Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.403568 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:19Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.416675 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.417038 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.417131 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.417238 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.417315 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:19Z","lastTransitionTime":"2026-01-05T21:52:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.425894 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3190d750f1a136cbc29252b21fa6112912e3884803e4b9ab4d3243eddeebf5c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:19Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.445931 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b682dcc92fe6772d11c24e417c24e9441f6d7a4b401cdb894c01742b36e032a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b682dcc92fe6772d11c24e417c24e9441f6d7a4b401cdb894c01742b36e032a3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:51:58Z\\\",\\\"message\\\":\\\"reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:57.693226 6579 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0105 21:51:57.693790 6579 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0105 21:51:57.693846 6579 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:57.693899 6579 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0105 21:51:57.693927 6579 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0105 21:51:57.693997 6579 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0105 21:51:57.694021 6579 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0105 21:51:57.694035 6579 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0105 21:51:57.694055 6579 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0105 21:51:57.694099 6579 factory.go:656] Stopping watch factory\\\\nI0105 21:51:57.694179 6579 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0105 21:51:57.694187 6579 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0105 21:51:57.694208 6579 handler.go:208] Removed *v1.Pod ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-fpk76_openshift-ovn-kubernetes(f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:19Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.457088 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mns6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74c455b1-4706-4ca7-bd82-2b99c3c83e3f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mns6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:19Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.470991 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f84f3608a1f16a89bb0b2bd33ddfd1fd31073c40e4528dd2de478f96cf60a75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:52:18Z\\\",\\\"message\\\":\\\"2026-01-05T21:51:32+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_53905aff-a543-4a55-8508-3702b4400eb8\\\\n2026-01-05T21:51:32+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_53905aff-a543-4a55-8508-3702b4400eb8 to /host/opt/cni/bin/\\\\n2026-01-05T21:51:33Z [verbose] multus-daemon started\\\\n2026-01-05T21:51:33Z [verbose] Readiness Indicator file check\\\\n2026-01-05T21:52:18Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:52:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:19Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.483576 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:19Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.520996 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.521061 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.521081 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.521109 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.521155 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:19Z","lastTransitionTime":"2026-01-05T21:52:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.624469 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.624537 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.624558 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.624589 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.624608 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:19Z","lastTransitionTime":"2026-01-05T21:52:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.720906 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:19 crc kubenswrapper[4910]: E0105 21:52:19.721150 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.728415 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.728476 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.728492 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.728519 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.728538 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:19Z","lastTransitionTime":"2026-01-05T21:52:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.831999 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.832033 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.832042 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.832056 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.832068 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:19Z","lastTransitionTime":"2026-01-05T21:52:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.934851 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.934928 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.934948 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.934976 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:19 crc kubenswrapper[4910]: I0105 21:52:19.934997 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:19Z","lastTransitionTime":"2026-01-05T21:52:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.037976 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.038050 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.038069 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.038101 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.038155 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:20Z","lastTransitionTime":"2026-01-05T21:52:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.141146 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.141184 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.141193 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.141210 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.141220 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:20Z","lastTransitionTime":"2026-01-05T21:52:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.244764 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.244838 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.244851 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.244874 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.244887 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:20Z","lastTransitionTime":"2026-01-05T21:52:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.348177 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.348237 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.348248 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.348269 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.348282 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:20Z","lastTransitionTime":"2026-01-05T21:52:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.451012 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.451071 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.451082 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.451106 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.451139 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:20Z","lastTransitionTime":"2026-01-05T21:52:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.554361 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.554435 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.554446 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.554469 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.554480 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:20Z","lastTransitionTime":"2026-01-05T21:52:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.657588 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.657662 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.657674 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.657693 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.657705 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:20Z","lastTransitionTime":"2026-01-05T21:52:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.721150 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:52:20 crc kubenswrapper[4910]: E0105 21:52:20.721350 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.721416 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.721416 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:52:20 crc kubenswrapper[4910]: E0105 21:52:20.721488 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:52:20 crc kubenswrapper[4910]: E0105 21:52:20.721564 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.760255 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.760339 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.760366 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.760400 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.760424 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:20Z","lastTransitionTime":"2026-01-05T21:52:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.864427 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.864484 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.864495 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.864515 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.864524 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:20Z","lastTransitionTime":"2026-01-05T21:52:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.968311 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.968370 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.968383 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.968405 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:20 crc kubenswrapper[4910]: I0105 21:52:20.968419 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:20Z","lastTransitionTime":"2026-01-05T21:52:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.072386 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.072459 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.072478 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.072508 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.072525 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:21Z","lastTransitionTime":"2026-01-05T21:52:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.175174 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.175244 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.175264 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.175294 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.175312 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:21Z","lastTransitionTime":"2026-01-05T21:52:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.279340 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.279459 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.279516 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.279546 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.279606 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:21Z","lastTransitionTime":"2026-01-05T21:52:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.383600 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.383660 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.383677 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.383705 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.383724 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:21Z","lastTransitionTime":"2026-01-05T21:52:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.486467 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.486504 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.486516 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.486533 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.486543 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:21Z","lastTransitionTime":"2026-01-05T21:52:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.589430 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.589485 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.589501 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.589523 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.589539 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:21Z","lastTransitionTime":"2026-01-05T21:52:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.659859 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.659918 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.659942 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.659972 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.659995 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:21Z","lastTransitionTime":"2026-01-05T21:52:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:21 crc kubenswrapper[4910]: E0105 21:52:21.675663 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:21Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.684954 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.684998 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.685012 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.685031 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.685045 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:21Z","lastTransitionTime":"2026-01-05T21:52:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:21 crc kubenswrapper[4910]: E0105 21:52:21.702504 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:21Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.707613 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.707852 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.707950 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.708025 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.708095 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:21Z","lastTransitionTime":"2026-01-05T21:52:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.720587 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:21 crc kubenswrapper[4910]: E0105 21:52:21.720860 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:52:21 crc kubenswrapper[4910]: E0105 21:52:21.724919 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:21Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.728774 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.728901 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.728972 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.729039 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.729095 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:21Z","lastTransitionTime":"2026-01-05T21:52:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:21 crc kubenswrapper[4910]: E0105 21:52:21.745416 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:21Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.750236 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.750292 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.750314 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.750339 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.750357 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:21Z","lastTransitionTime":"2026-01-05T21:52:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:21 crc kubenswrapper[4910]: E0105 21:52:21.763342 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:21Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:21 crc kubenswrapper[4910]: E0105 21:52:21.763661 4910 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.765914 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.765987 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.766011 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.766044 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.766066 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:21Z","lastTransitionTime":"2026-01-05T21:52:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.868992 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.869050 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.869070 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.869099 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.869143 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:21Z","lastTransitionTime":"2026-01-05T21:52:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.972167 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.972208 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.972220 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.972239 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:21 crc kubenswrapper[4910]: I0105 21:52:21.972250 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:21Z","lastTransitionTime":"2026-01-05T21:52:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.075058 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.075146 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.075161 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.075182 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.075197 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:22Z","lastTransitionTime":"2026-01-05T21:52:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.178651 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.178722 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.178740 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.178768 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.178788 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:22Z","lastTransitionTime":"2026-01-05T21:52:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.281866 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.281924 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.281941 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.281969 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.281987 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:22Z","lastTransitionTime":"2026-01-05T21:52:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.385620 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.385674 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.385689 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.385707 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.385719 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:22Z","lastTransitionTime":"2026-01-05T21:52:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.488306 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.488392 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.488413 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.488442 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.488462 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:22Z","lastTransitionTime":"2026-01-05T21:52:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.592342 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.592401 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.592418 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.592445 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.592464 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:22Z","lastTransitionTime":"2026-01-05T21:52:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.696502 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.696553 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.696564 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.696581 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.696591 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:22Z","lastTransitionTime":"2026-01-05T21:52:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.721265 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.721320 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.721348 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:52:22 crc kubenswrapper[4910]: E0105 21:52:22.721395 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:52:22 crc kubenswrapper[4910]: E0105 21:52:22.721649 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:52:22 crc kubenswrapper[4910]: E0105 21:52:22.721749 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.800025 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.800092 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.800109 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.800170 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.800191 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:22Z","lastTransitionTime":"2026-01-05T21:52:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.903463 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.903541 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.903564 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.903601 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:22 crc kubenswrapper[4910]: I0105 21:52:22.903625 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:22Z","lastTransitionTime":"2026-01-05T21:52:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.006726 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.006800 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.006819 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.006853 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.006874 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:23Z","lastTransitionTime":"2026-01-05T21:52:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.110632 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.110702 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.110720 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.110751 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.110771 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:23Z","lastTransitionTime":"2026-01-05T21:52:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.214328 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.214398 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.214417 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.214448 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.214469 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:23Z","lastTransitionTime":"2026-01-05T21:52:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.318823 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.318925 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.318951 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.318986 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.319010 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:23Z","lastTransitionTime":"2026-01-05T21:52:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.423337 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.423407 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.423427 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.423454 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.423476 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:23Z","lastTransitionTime":"2026-01-05T21:52:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.528065 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.528154 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.528177 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.528217 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.528239 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:23Z","lastTransitionTime":"2026-01-05T21:52:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.631252 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.631315 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.631334 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.631362 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.631380 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:23Z","lastTransitionTime":"2026-01-05T21:52:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.720634 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:23 crc kubenswrapper[4910]: E0105 21:52:23.720955 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.735564 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.735752 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.735902 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.736072 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.736276 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:23Z","lastTransitionTime":"2026-01-05T21:52:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.839992 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.840243 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.840389 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.840542 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.840697 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:23Z","lastTransitionTime":"2026-01-05T21:52:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.944103 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.944217 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.944237 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.944272 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:23 crc kubenswrapper[4910]: I0105 21:52:23.944294 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:23Z","lastTransitionTime":"2026-01-05T21:52:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.048011 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.048230 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.048473 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.048573 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.048599 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:24Z","lastTransitionTime":"2026-01-05T21:52:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.152811 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.152885 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.152904 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.152931 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.152953 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:24Z","lastTransitionTime":"2026-01-05T21:52:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.256011 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.256509 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.256711 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.257005 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.257217 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:24Z","lastTransitionTime":"2026-01-05T21:52:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.360631 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.360733 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.360820 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.360856 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.360878 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:24Z","lastTransitionTime":"2026-01-05T21:52:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.464480 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.464551 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.464571 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.464598 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.464646 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:24Z","lastTransitionTime":"2026-01-05T21:52:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.568642 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.568705 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.568720 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.568744 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.568763 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:24Z","lastTransitionTime":"2026-01-05T21:52:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.671163 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.671229 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.671251 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.671281 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.671302 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:24Z","lastTransitionTime":"2026-01-05T21:52:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.721536 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.721631 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.721542 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:52:24 crc kubenswrapper[4910]: E0105 21:52:24.721803 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:52:24 crc kubenswrapper[4910]: E0105 21:52:24.721971 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:52:24 crc kubenswrapper[4910]: E0105 21:52:24.722179 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.774051 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.774114 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.774171 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.774200 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.774222 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:24Z","lastTransitionTime":"2026-01-05T21:52:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.878239 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.878298 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.878317 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.878347 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.878367 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:24Z","lastTransitionTime":"2026-01-05T21:52:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.982029 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.982092 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.982110 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.982172 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:24 crc kubenswrapper[4910]: I0105 21:52:24.982194 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:24Z","lastTransitionTime":"2026-01-05T21:52:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.085852 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.085931 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.085957 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.085993 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.086022 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:25Z","lastTransitionTime":"2026-01-05T21:52:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.190090 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.190822 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.191044 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.191346 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.191551 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:25Z","lastTransitionTime":"2026-01-05T21:52:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.296714 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.296771 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.296788 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.296815 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.296834 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:25Z","lastTransitionTime":"2026-01-05T21:52:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.407016 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.407084 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.407108 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.407183 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.407210 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:25Z","lastTransitionTime":"2026-01-05T21:52:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.510830 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.510908 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.510927 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.510950 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.510968 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:25Z","lastTransitionTime":"2026-01-05T21:52:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.621089 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.621196 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.621215 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.621244 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.621266 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:25Z","lastTransitionTime":"2026-01-05T21:52:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.721450 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:25 crc kubenswrapper[4910]: E0105 21:52:25.721961 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.723389 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.723442 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.723462 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.723485 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.723504 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:25Z","lastTransitionTime":"2026-01-05T21:52:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.733793 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.826086 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.826225 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.826268 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.826309 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.826333 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:25Z","lastTransitionTime":"2026-01-05T21:52:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.929276 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.929331 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.929347 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.929372 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:25 crc kubenswrapper[4910]: I0105 21:52:25.929387 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:25Z","lastTransitionTime":"2026-01-05T21:52:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.032066 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.032138 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.032152 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.032212 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.032226 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:26Z","lastTransitionTime":"2026-01-05T21:52:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.134937 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.135290 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.135397 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.135520 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.135641 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:26Z","lastTransitionTime":"2026-01-05T21:52:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.239378 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.239934 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.240040 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.240167 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.240268 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:26Z","lastTransitionTime":"2026-01-05T21:52:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.344186 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.344246 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.344266 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.344323 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.344342 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:26Z","lastTransitionTime":"2026-01-05T21:52:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.447684 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.448080 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.448301 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.448462 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.448600 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:26Z","lastTransitionTime":"2026-01-05T21:52:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.552072 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.552148 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.552162 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.552185 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.552201 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:26Z","lastTransitionTime":"2026-01-05T21:52:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.656205 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.656277 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.656292 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.656315 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.656334 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:26Z","lastTransitionTime":"2026-01-05T21:52:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.720770 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.720948 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:52:26 crc kubenswrapper[4910]: E0105 21:52:26.721233 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.721303 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:52:26 crc kubenswrapper[4910]: E0105 21:52:26.721458 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:52:26 crc kubenswrapper[4910]: E0105 21:52:26.721629 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.722851 4910 scope.go:117] "RemoveContainer" containerID="b682dcc92fe6772d11c24e417c24e9441f6d7a4b401cdb894c01742b36e032a3" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.767895 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.767963 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.767984 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.768019 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.768042 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:26Z","lastTransitionTime":"2026-01-05T21:52:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.871957 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.872062 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.872086 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.872114 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.872169 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:26Z","lastTransitionTime":"2026-01-05T21:52:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.978230 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.978341 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.978362 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.979770 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:26 crc kubenswrapper[4910]: I0105 21:52:26.979826 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:26Z","lastTransitionTime":"2026-01-05T21:52:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.083306 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.083385 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.083404 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.083435 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.083461 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:27Z","lastTransitionTime":"2026-01-05T21:52:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.186269 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.186307 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.186319 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.186339 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.186351 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:27Z","lastTransitionTime":"2026-01-05T21:52:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.193842 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpk76_f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b/ovnkube-controller/2.log" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.197246 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" event={"ID":"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b","Type":"ContainerStarted","Data":"b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed"} Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.197646 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.213505 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:27Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.226812 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:27Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.237777 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:27Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.248280 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"225840c3-7ddd-4637-b60d-6cd20db05d52\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88a32f17e02a9d35c306705ff1ac0f65b2d02d2a7f376412f37632608dbc2711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://615555f2bb3e1c61416edc24a76eddcfe181a553284add131686a55edbadb29c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://615555f2bb3e1c61416edc24a76eddcfe181a553284add131686a55edbadb29c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:27Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.260659 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:27Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.272906 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:27Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.288901 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.288941 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.288956 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.288978 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.288994 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:27Z","lastTransitionTime":"2026-01-05T21:52:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.289673 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3190d750f1a136cbc29252b21fa6112912e3884803e4b9ab4d3243eddeebf5c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:27Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.324416 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b682dcc92fe6772d11c24e417c24e9441f6d7a4b401cdb894c01742b36e032a3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:51:58Z\\\",\\\"message\\\":\\\"reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:57.693226 6579 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0105 21:51:57.693790 6579 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0105 21:51:57.693846 6579 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:57.693899 6579 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0105 21:51:57.693927 6579 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0105 21:51:57.693997 6579 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0105 21:51:57.694021 6579 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0105 21:51:57.694035 6579 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0105 21:51:57.694055 6579 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0105 21:51:57.694099 6579 factory.go:656] Stopping watch factory\\\\nI0105 21:51:57.694179 6579 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0105 21:51:57.694187 6579 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0105 21:51:57.694208 6579 handler.go:208] Removed *v1.Pod ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:52:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:27Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.335000 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mns6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74c455b1-4706-4ca7-bd82-2b99c3c83e3f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mns6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:27Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.364140 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:27Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.378196 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:27Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.391272 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.391317 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.391326 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.391344 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.391355 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:27Z","lastTransitionTime":"2026-01-05T21:52:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.395649 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:27Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.413418 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f84f3608a1f16a89bb0b2bd33ddfd1fd31073c40e4528dd2de478f96cf60a75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:52:18Z\\\",\\\"message\\\":\\\"2026-01-05T21:51:32+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_53905aff-a543-4a55-8508-3702b4400eb8\\\\n2026-01-05T21:51:32+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_53905aff-a543-4a55-8508-3702b4400eb8 to /host/opt/cni/bin/\\\\n2026-01-05T21:51:33Z [verbose] multus-daemon started\\\\n2026-01-05T21:51:33Z [verbose] Readiness Indicator file check\\\\n2026-01-05T21:52:18Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:52:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:27Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.425216 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:27Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.438010 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:27Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.448788 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638c1985fe0a3b8f01687e06ff21e90cc026dadac8f6ee435d9c695c85a6ae48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3f374683807a8c6f06bb4e6ff3dab813a47759f249583a9bd1b879619b1d1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-t58h4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:27Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.461960 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:27Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.472937 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42d05c09-eb4c-4ee9-a5e2-e91e3a42ceaf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c366ec1be5116c8015777a182415c623173912f309b8dcc52e2dd58be79908ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://82b9d35b7a2b2ca1de438b27b3280478cbd8aa200a186456585bc20994359e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20c3f8271da0182ae792c01d42dc43c0732466b8d049fbc27a95f86a28da1ef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ba8aaa4607532622d43bf6d56597e26066226abc45454f5be583c22c41b108e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ba8aaa4607532622d43bf6d56597e26066226abc45454f5be583c22c41b108e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:27Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.486108 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:27Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.494001 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.494047 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.494059 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.494076 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.494085 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:27Z","lastTransitionTime":"2026-01-05T21:52:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.597102 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.597166 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.597176 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.597198 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.597209 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:27Z","lastTransitionTime":"2026-01-05T21:52:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.701229 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.701286 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.701303 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.701332 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.701350 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:27Z","lastTransitionTime":"2026-01-05T21:52:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.720728 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:27 crc kubenswrapper[4910]: E0105 21:52:27.720931 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.804445 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.804574 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.804597 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.804627 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.804645 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:27Z","lastTransitionTime":"2026-01-05T21:52:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.908356 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.908430 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.908496 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.908528 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:27 crc kubenswrapper[4910]: I0105 21:52:27.908550 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:27Z","lastTransitionTime":"2026-01-05T21:52:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.012744 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.012817 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.012835 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.012862 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.012883 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:28Z","lastTransitionTime":"2026-01-05T21:52:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.116572 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.116628 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.116651 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.116676 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.116696 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:28Z","lastTransitionTime":"2026-01-05T21:52:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.204770 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpk76_f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b/ovnkube-controller/3.log" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.206082 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpk76_f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b/ovnkube-controller/2.log" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.209531 4910 generic.go:334] "Generic (PLEG): container finished" podID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerID="b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed" exitCode=1 Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.209601 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" event={"ID":"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b","Type":"ContainerDied","Data":"b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed"} Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.209656 4910 scope.go:117] "RemoveContainer" containerID="b682dcc92fe6772d11c24e417c24e9441f6d7a4b401cdb894c01742b36e032a3" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.210864 4910 scope.go:117] "RemoveContainer" containerID="b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed" Jan 05 21:52:28 crc kubenswrapper[4910]: E0105 21:52:28.211160 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-fpk76_openshift-ovn-kubernetes(f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.222358 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.222446 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.222459 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.222480 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.222493 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:28Z","lastTransitionTime":"2026-01-05T21:52:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.235963 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638c1985fe0a3b8f01687e06ff21e90cc026dadac8f6ee435d9c695c85a6ae48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3f374683807a8c6f06bb4e6ff3dab813a47759f249583a9bd1b879619b1d1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-t58h4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.259462 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.287055 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42d05c09-eb4c-4ee9-a5e2-e91e3a42ceaf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c366ec1be5116c8015777a182415c623173912f309b8dcc52e2dd58be79908ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://82b9d35b7a2b2ca1de438b27b3280478cbd8aa200a186456585bc20994359e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20c3f8271da0182ae792c01d42dc43c0732466b8d049fbc27a95f86a28da1ef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ba8aaa4607532622d43bf6d56597e26066226abc45454f5be583c22c41b108e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ba8aaa4607532622d43bf6d56597e26066226abc45454f5be583c22c41b108e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.312350 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.329676 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.329730 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.329750 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.329777 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.329796 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:28Z","lastTransitionTime":"2026-01-05T21:52:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.334467 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.354078 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.373544 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.393005 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"225840c3-7ddd-4637-b60d-6cd20db05d52\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88a32f17e02a9d35c306705ff1ac0f65b2d02d2a7f376412f37632608dbc2711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://615555f2bb3e1c61416edc24a76eddcfe181a553284add131686a55edbadb29c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://615555f2bb3e1c61416edc24a76eddcfe181a553284add131686a55edbadb29c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.414609 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.433908 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.433983 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.434003 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.434035 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.434056 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:28Z","lastTransitionTime":"2026-01-05T21:52:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.438534 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.460893 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.495494 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b682dcc92fe6772d11c24e417c24e9441f6d7a4b401cdb894c01742b36e032a3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:51:58Z\\\",\\\"message\\\":\\\"reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:57.693226 6579 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0105 21:51:57.693790 6579 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0105 21:51:57.693846 6579 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:57.693899 6579 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0105 21:51:57.693927 6579 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0105 21:51:57.693997 6579 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0105 21:51:57.694021 6579 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0105 21:51:57.694035 6579 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0105 21:51:57.694055 6579 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0105 21:51:57.694099 6579 factory.go:656] Stopping watch factory\\\\nI0105 21:51:57.694179 6579 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0105 21:51:57.694187 6579 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0105 21:51:57.694208 6579 handler.go:208] Removed *v1.Pod ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:52:28Z\\\",\\\"message\\\":\\\" 6977 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0105 21:52:27.895282 6977 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0105 21:52:27.895306 6977 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0105 21:52:27.895316 6977 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0105 21:52:27.895353 6977 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0105 21:52:27.895419 6977 factory.go:656] Stopping watch factory\\\\nI0105 21:52:27.895451 6977 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0105 21:52:27.895449 6977 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:52:27.895467 6977 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0105 21:52:27.895483 6977 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0105 21:52:27.895496 6977 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0105 21:52:27.895509 6977 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0105 21:52:27.895522 6977 handler.go:208] Removed *v1.Node event handler 2\\\\nI0105 21:52:27.895536 6977 handler.go:208] Removed *v1.Node event handler 7\\\\nI0105 21:52:27.895657 6977 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:52:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.515065 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mns6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74c455b1-4706-4ca7-bd82-2b99c3c83e3f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mns6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.537839 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.537895 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.537916 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.537948 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.537966 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:28Z","lastTransitionTime":"2026-01-05T21:52:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.550872 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.574540 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.599784 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.626368 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3190d750f1a136cbc29252b21fa6112912e3884803e4b9ab4d3243eddeebf5c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.642449 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.642545 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.642571 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.642612 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.642637 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:28Z","lastTransitionTime":"2026-01-05T21:52:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.650170 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f84f3608a1f16a89bb0b2bd33ddfd1fd31073c40e4528dd2de478f96cf60a75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:52:18Z\\\",\\\"message\\\":\\\"2026-01-05T21:51:32+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_53905aff-a543-4a55-8508-3702b4400eb8\\\\n2026-01-05T21:51:32+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_53905aff-a543-4a55-8508-3702b4400eb8 to /host/opt/cni/bin/\\\\n2026-01-05T21:51:33Z [verbose] multus-daemon started\\\\n2026-01-05T21:51:33Z [verbose] Readiness Indicator file check\\\\n2026-01-05T21:52:18Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:52:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.670419 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.721235 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.721374 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:52:28 crc kubenswrapper[4910]: E0105 21:52:28.721620 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.721711 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:52:28 crc kubenswrapper[4910]: E0105 21:52:28.721820 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:52:28 crc kubenswrapper[4910]: E0105 21:52:28.721908 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.744948 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.745008 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.745025 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.745046 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.745061 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:28Z","lastTransitionTime":"2026-01-05T21:52:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.763549 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.777961 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.793786 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.824488 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3190d750f1a136cbc29252b21fa6112912e3884803e4b9ab4d3243eddeebf5c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.856397 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.856475 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.856497 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.856530 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.856553 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:28Z","lastTransitionTime":"2026-01-05T21:52:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.856973 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b682dcc92fe6772d11c24e417c24e9441f6d7a4b401cdb894c01742b36e032a3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:51:58Z\\\",\\\"message\\\":\\\"reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:57.693226 6579 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0105 21:51:57.693790 6579 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0105 21:51:57.693846 6579 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:51:57.693899 6579 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0105 21:51:57.693927 6579 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0105 21:51:57.693997 6579 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0105 21:51:57.694021 6579 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0105 21:51:57.694035 6579 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0105 21:51:57.694055 6579 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0105 21:51:57.694099 6579 factory.go:656] Stopping watch factory\\\\nI0105 21:51:57.694179 6579 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0105 21:51:57.694187 6579 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0105 21:51:57.694208 6579 handler.go:208] Removed *v1.Pod ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:52:28Z\\\",\\\"message\\\":\\\" 6977 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0105 21:52:27.895282 6977 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0105 21:52:27.895306 6977 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0105 21:52:27.895316 6977 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0105 21:52:27.895353 6977 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0105 21:52:27.895419 6977 factory.go:656] Stopping watch factory\\\\nI0105 21:52:27.895451 6977 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0105 21:52:27.895449 6977 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:52:27.895467 6977 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0105 21:52:27.895483 6977 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0105 21:52:27.895496 6977 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0105 21:52:27.895509 6977 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0105 21:52:27.895522 6977 handler.go:208] Removed *v1.Node event handler 2\\\\nI0105 21:52:27.895536 6977 handler.go:208] Removed *v1.Node event handler 7\\\\nI0105 21:52:27.895657 6977 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:52:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.879792 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mns6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74c455b1-4706-4ca7-bd82-2b99c3c83e3f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mns6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.902435 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f84f3608a1f16a89bb0b2bd33ddfd1fd31073c40e4528dd2de478f96cf60a75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:52:18Z\\\",\\\"message\\\":\\\"2026-01-05T21:51:32+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_53905aff-a543-4a55-8508-3702b4400eb8\\\\n2026-01-05T21:51:32+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_53905aff-a543-4a55-8508-3702b4400eb8 to /host/opt/cni/bin/\\\\n2026-01-05T21:51:33Z [verbose] multus-daemon started\\\\n2026-01-05T21:51:33Z [verbose] Readiness Indicator file check\\\\n2026-01-05T21:52:18Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:52:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.920881 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.942785 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.957564 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42d05c09-eb4c-4ee9-a5e2-e91e3a42ceaf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c366ec1be5116c8015777a182415c623173912f309b8dcc52e2dd58be79908ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://82b9d35b7a2b2ca1de438b27b3280478cbd8aa200a186456585bc20994359e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20c3f8271da0182ae792c01d42dc43c0732466b8d049fbc27a95f86a28da1ef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ba8aaa4607532622d43bf6d56597e26066226abc45454f5be583c22c41b108e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ba8aaa4607532622d43bf6d56597e26066226abc45454f5be583c22c41b108e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.960492 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.960541 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.960560 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.960590 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.960609 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:28Z","lastTransitionTime":"2026-01-05T21:52:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.977336 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:28 crc kubenswrapper[4910]: I0105 21:52:28.997402 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:28Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.014625 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638c1985fe0a3b8f01687e06ff21e90cc026dadac8f6ee435d9c695c85a6ae48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3f374683807a8c6f06bb4e6ff3dab813a47759f249583a9bd1b879619b1d1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-t58h4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.030744 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"225840c3-7ddd-4637-b60d-6cd20db05d52\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88a32f17e02a9d35c306705ff1ac0f65b2d02d2a7f376412f37632608dbc2711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://615555f2bb3e1c61416edc24a76eddcfe181a553284add131686a55edbadb29c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://615555f2bb3e1c61416edc24a76eddcfe181a553284add131686a55edbadb29c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.048027 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.064888 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.064941 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.064954 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.064973 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.064986 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:29Z","lastTransitionTime":"2026-01-05T21:52:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.068775 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.084650 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.102870 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.118212 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.167735 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.167801 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.167820 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.167845 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.167863 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:29Z","lastTransitionTime":"2026-01-05T21:52:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.216516 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpk76_f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b/ovnkube-controller/3.log" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.222270 4910 scope.go:117] "RemoveContainer" containerID="b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed" Jan 05 21:52:29 crc kubenswrapper[4910]: E0105 21:52:29.222681 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-fpk76_openshift-ovn-kubernetes(f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.248737 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.269059 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42d05c09-eb4c-4ee9-a5e2-e91e3a42ceaf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c366ec1be5116c8015777a182415c623173912f309b8dcc52e2dd58be79908ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://82b9d35b7a2b2ca1de438b27b3280478cbd8aa200a186456585bc20994359e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20c3f8271da0182ae792c01d42dc43c0732466b8d049fbc27a95f86a28da1ef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ba8aaa4607532622d43bf6d56597e26066226abc45454f5be583c22c41b108e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ba8aaa4607532622d43bf6d56597e26066226abc45454f5be583c22c41b108e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.271276 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.271334 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.271354 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.271384 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.271407 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:29Z","lastTransitionTime":"2026-01-05T21:52:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.290568 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.311482 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.332165 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638c1985fe0a3b8f01687e06ff21e90cc026dadac8f6ee435d9c695c85a6ae48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3f374683807a8c6f06bb4e6ff3dab813a47759f249583a9bd1b879619b1d1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-t58h4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.350988 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.369750 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"225840c3-7ddd-4637-b60d-6cd20db05d52\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88a32f17e02a9d35c306705ff1ac0f65b2d02d2a7f376412f37632608dbc2711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://615555f2bb3e1c61416edc24a76eddcfe181a553284add131686a55edbadb29c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://615555f2bb3e1c61416edc24a76eddcfe181a553284add131686a55edbadb29c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.374538 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.374619 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.374643 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.374673 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.374692 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:29Z","lastTransitionTime":"2026-01-05T21:52:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.390036 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.415943 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.439384 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.457691 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.476776 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mns6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74c455b1-4706-4ca7-bd82-2b99c3c83e3f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mns6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.480078 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.480170 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.480188 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.480218 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.480238 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:29Z","lastTransitionTime":"2026-01-05T21:52:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.514657 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.536894 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.559481 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.584670 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.584731 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.584750 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.584782 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.584805 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:29Z","lastTransitionTime":"2026-01-05T21:52:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.585003 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3190d750f1a136cbc29252b21fa6112912e3884803e4b9ab4d3243eddeebf5c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.624453 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:52:28Z\\\",\\\"message\\\":\\\" 6977 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0105 21:52:27.895282 6977 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0105 21:52:27.895306 6977 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0105 21:52:27.895316 6977 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0105 21:52:27.895353 6977 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0105 21:52:27.895419 6977 factory.go:656] Stopping watch factory\\\\nI0105 21:52:27.895451 6977 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0105 21:52:27.895449 6977 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:52:27.895467 6977 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0105 21:52:27.895483 6977 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0105 21:52:27.895496 6977 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0105 21:52:27.895509 6977 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0105 21:52:27.895522 6977 handler.go:208] Removed *v1.Node event handler 2\\\\nI0105 21:52:27.895536 6977 handler.go:208] Removed *v1.Node event handler 7\\\\nI0105 21:52:27.895657 6977 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:52:27Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-fpk76_openshift-ovn-kubernetes(f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.647464 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f84f3608a1f16a89bb0b2bd33ddfd1fd31073c40e4528dd2de478f96cf60a75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:52:18Z\\\",\\\"message\\\":\\\"2026-01-05T21:51:32+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_53905aff-a543-4a55-8508-3702b4400eb8\\\\n2026-01-05T21:51:32+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_53905aff-a543-4a55-8508-3702b4400eb8 to /host/opt/cni/bin/\\\\n2026-01-05T21:51:33Z [verbose] multus-daemon started\\\\n2026-01-05T21:51:33Z [verbose] Readiness Indicator file check\\\\n2026-01-05T21:52:18Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:52:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.667474 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:29Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.689302 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.689377 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.689585 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.689620 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.689640 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:29Z","lastTransitionTime":"2026-01-05T21:52:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.721423 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:29 crc kubenswrapper[4910]: E0105 21:52:29.721670 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.793690 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.793918 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.793946 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.793977 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.793994 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:29Z","lastTransitionTime":"2026-01-05T21:52:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.897900 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.898376 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.898529 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.898740 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:29 crc kubenswrapper[4910]: I0105 21:52:29.898882 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:29Z","lastTransitionTime":"2026-01-05T21:52:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.002778 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.002862 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.002881 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.002912 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.002931 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:30Z","lastTransitionTime":"2026-01-05T21:52:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.106871 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.107387 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.107613 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.107754 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.107894 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:30Z","lastTransitionTime":"2026-01-05T21:52:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.211991 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.212064 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.212091 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.212165 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.212193 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:30Z","lastTransitionTime":"2026-01-05T21:52:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.314812 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.314867 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.314877 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.314894 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.314904 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:30Z","lastTransitionTime":"2026-01-05T21:52:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.417920 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.417962 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.417974 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.417994 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.418006 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:30Z","lastTransitionTime":"2026-01-05T21:52:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.521279 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.521308 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.521317 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.521332 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.521342 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:30Z","lastTransitionTime":"2026-01-05T21:52:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.625178 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.625265 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.625284 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.625316 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.625335 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:30Z","lastTransitionTime":"2026-01-05T21:52:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.671784 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:52:30 crc kubenswrapper[4910]: E0105 21:52:30.672046 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:34.671991529 +0000 UTC m=+146.249489239 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.672105 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.672213 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.672258 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.672298 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:52:30 crc kubenswrapper[4910]: E0105 21:52:30.672308 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 05 21:52:30 crc kubenswrapper[4910]: E0105 21:52:30.672339 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 05 21:52:30 crc kubenswrapper[4910]: E0105 21:52:30.672359 4910 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 05 21:52:30 crc kubenswrapper[4910]: E0105 21:52:30.672396 4910 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 05 21:52:30 crc kubenswrapper[4910]: E0105 21:52:30.672442 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-05 21:53:34.672423819 +0000 UTC m=+146.249921519 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 05 21:52:30 crc kubenswrapper[4910]: E0105 21:52:30.672478 4910 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 05 21:52:30 crc kubenswrapper[4910]: E0105 21:52:30.672531 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-05 21:53:34.672483691 +0000 UTC m=+146.249981631 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 05 21:52:30 crc kubenswrapper[4910]: E0105 21:52:30.672536 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 05 21:52:30 crc kubenswrapper[4910]: E0105 21:52:30.672559 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-05 21:53:34.672541442 +0000 UTC m=+146.250039142 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 05 21:52:30 crc kubenswrapper[4910]: E0105 21:52:30.672570 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 05 21:52:30 crc kubenswrapper[4910]: E0105 21:52:30.672594 4910 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 05 21:52:30 crc kubenswrapper[4910]: E0105 21:52:30.672668 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-05 21:53:34.672648235 +0000 UTC m=+146.250145935 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.721420 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.721499 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.721499 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:52:30 crc kubenswrapper[4910]: E0105 21:52:30.721586 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:52:30 crc kubenswrapper[4910]: E0105 21:52:30.721695 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:52:30 crc kubenswrapper[4910]: E0105 21:52:30.721875 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.728436 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.728465 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.728476 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.728491 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.728501 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:30Z","lastTransitionTime":"2026-01-05T21:52:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.831996 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.832489 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.832685 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.832908 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.833087 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:30Z","lastTransitionTime":"2026-01-05T21:52:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.937265 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.937713 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.937864 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.938037 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:30 crc kubenswrapper[4910]: I0105 21:52:30.938223 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:30Z","lastTransitionTime":"2026-01-05T21:52:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.041298 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.041346 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.041408 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.041431 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.041443 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:31Z","lastTransitionTime":"2026-01-05T21:52:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.146007 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.146059 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.146074 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.146097 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.146111 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:31Z","lastTransitionTime":"2026-01-05T21:52:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.249522 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.249571 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.249584 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.249609 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.249660 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:31Z","lastTransitionTime":"2026-01-05T21:52:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.353680 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.353734 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.353748 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.353768 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.353781 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:31Z","lastTransitionTime":"2026-01-05T21:52:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.457107 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.457268 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.457291 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.457324 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.457344 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:31Z","lastTransitionTime":"2026-01-05T21:52:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.560179 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.560254 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.560270 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.560293 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.560306 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:31Z","lastTransitionTime":"2026-01-05T21:52:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.662549 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.662608 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.662618 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.662637 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.662651 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:31Z","lastTransitionTime":"2026-01-05T21:52:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.720911 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:31 crc kubenswrapper[4910]: E0105 21:52:31.721308 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.765539 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.765600 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.765609 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.765627 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.765642 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:31Z","lastTransitionTime":"2026-01-05T21:52:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.868582 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.868725 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.868747 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.868779 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.868799 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:31Z","lastTransitionTime":"2026-01-05T21:52:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.972226 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.972305 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.972328 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.972360 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:31 crc kubenswrapper[4910]: I0105 21:52:31.972383 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:31Z","lastTransitionTime":"2026-01-05T21:52:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.012329 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.012386 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.012403 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.012427 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.012444 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:32Z","lastTransitionTime":"2026-01-05T21:52:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:32 crc kubenswrapper[4910]: E0105 21:52:32.034797 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.041236 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.041464 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.041612 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.041803 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.041946 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:32Z","lastTransitionTime":"2026-01-05T21:52:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:32 crc kubenswrapper[4910]: E0105 21:52:32.060800 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.067668 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.067721 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.067736 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.067758 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.067771 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:32Z","lastTransitionTime":"2026-01-05T21:52:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:32 crc kubenswrapper[4910]: E0105 21:52:32.090844 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.097441 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.097529 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.097549 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.097579 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.097601 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:32Z","lastTransitionTime":"2026-01-05T21:52:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:32 crc kubenswrapper[4910]: E0105 21:52:32.119618 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.126263 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.126319 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.126339 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.126367 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.126387 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:32Z","lastTransitionTime":"2026-01-05T21:52:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:32 crc kubenswrapper[4910]: E0105 21:52:32.147943 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:32Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:32 crc kubenswrapper[4910]: E0105 21:52:32.148235 4910 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.150243 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.150291 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.150309 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.150334 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.150354 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:32Z","lastTransitionTime":"2026-01-05T21:52:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.254201 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.254278 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.254302 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.254340 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.254364 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:32Z","lastTransitionTime":"2026-01-05T21:52:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.357650 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.357721 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.357738 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.357768 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.357785 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:32Z","lastTransitionTime":"2026-01-05T21:52:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.462345 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.462436 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.462457 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.462486 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.462507 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:32Z","lastTransitionTime":"2026-01-05T21:52:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.566288 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.566372 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.566394 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.566423 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.566445 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:32Z","lastTransitionTime":"2026-01-05T21:52:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.669794 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.669861 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.669884 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.669923 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.669948 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:32Z","lastTransitionTime":"2026-01-05T21:52:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.721192 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.721272 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.721207 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:52:32 crc kubenswrapper[4910]: E0105 21:52:32.721497 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:52:32 crc kubenswrapper[4910]: E0105 21:52:32.721624 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:52:32 crc kubenswrapper[4910]: E0105 21:52:32.722091 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.772743 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.772791 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.772808 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.772832 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.772852 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:32Z","lastTransitionTime":"2026-01-05T21:52:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.875716 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.876041 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.876181 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.876276 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.876399 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:32Z","lastTransitionTime":"2026-01-05T21:52:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.979686 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.979760 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.979782 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.979810 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:32 crc kubenswrapper[4910]: I0105 21:52:32.979867 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:32Z","lastTransitionTime":"2026-01-05T21:52:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.083096 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.083223 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.083243 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.083273 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.083294 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:33Z","lastTransitionTime":"2026-01-05T21:52:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.187167 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.187233 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.187253 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.187284 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.187310 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:33Z","lastTransitionTime":"2026-01-05T21:52:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.291328 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.291412 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.291452 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.291491 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.291515 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:33Z","lastTransitionTime":"2026-01-05T21:52:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.395544 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.395611 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.395629 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.395657 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.395678 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:33Z","lastTransitionTime":"2026-01-05T21:52:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.499282 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.499356 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.499381 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.499414 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.499434 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:33Z","lastTransitionTime":"2026-01-05T21:52:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.603105 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.603199 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.603219 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.603251 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.603277 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:33Z","lastTransitionTime":"2026-01-05T21:52:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.707009 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.707092 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.707108 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.707174 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.707194 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:33Z","lastTransitionTime":"2026-01-05T21:52:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.722184 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:33 crc kubenswrapper[4910]: E0105 21:52:33.722395 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.811211 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.811290 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.811316 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.811350 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.811371 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:33Z","lastTransitionTime":"2026-01-05T21:52:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.914653 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.914709 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.914727 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.914752 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:33 crc kubenswrapper[4910]: I0105 21:52:33.914770 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:33Z","lastTransitionTime":"2026-01-05T21:52:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.018263 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.018336 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.018355 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.018383 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.018402 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:34Z","lastTransitionTime":"2026-01-05T21:52:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.121470 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.121531 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.121550 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.121580 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.121600 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:34Z","lastTransitionTime":"2026-01-05T21:52:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.225455 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.225503 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.225521 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.225546 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.225564 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:34Z","lastTransitionTime":"2026-01-05T21:52:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.328779 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.328862 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.328880 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.328910 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.328932 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:34Z","lastTransitionTime":"2026-01-05T21:52:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.432943 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.433014 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.433035 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.433063 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.433086 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:34Z","lastTransitionTime":"2026-01-05T21:52:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.536337 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.536404 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.536427 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.536460 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.536485 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:34Z","lastTransitionTime":"2026-01-05T21:52:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.640318 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.640394 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.640421 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.640458 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.640485 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:34Z","lastTransitionTime":"2026-01-05T21:52:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.721478 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.721486 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:52:34 crc kubenswrapper[4910]: E0105 21:52:34.722580 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.722640 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:52:34 crc kubenswrapper[4910]: E0105 21:52:34.722803 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:52:34 crc kubenswrapper[4910]: E0105 21:52:34.722968 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.742577 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.742627 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.742640 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.742657 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.742669 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:34Z","lastTransitionTime":"2026-01-05T21:52:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.845048 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.845166 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.845186 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.845218 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.845237 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:34Z","lastTransitionTime":"2026-01-05T21:52:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.949161 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.949228 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.949249 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.949277 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:34 crc kubenswrapper[4910]: I0105 21:52:34.949298 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:34Z","lastTransitionTime":"2026-01-05T21:52:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.053586 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.054210 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.054244 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.054280 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.054307 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:35Z","lastTransitionTime":"2026-01-05T21:52:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.157947 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.158023 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.158040 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.158069 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.158089 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:35Z","lastTransitionTime":"2026-01-05T21:52:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.261634 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.261696 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.261720 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.261754 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.261775 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:35Z","lastTransitionTime":"2026-01-05T21:52:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.364825 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.364882 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.364898 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.364925 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.364942 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:35Z","lastTransitionTime":"2026-01-05T21:52:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.468768 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.468846 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.468865 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.468894 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.468915 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:35Z","lastTransitionTime":"2026-01-05T21:52:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.572737 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.572795 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.572811 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.572838 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.572857 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:35Z","lastTransitionTime":"2026-01-05T21:52:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.676857 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.676944 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.676972 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.677005 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.677027 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:35Z","lastTransitionTime":"2026-01-05T21:52:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.721598 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:35 crc kubenswrapper[4910]: E0105 21:52:35.721864 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.780827 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.780890 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.780908 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.780934 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.780957 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:35Z","lastTransitionTime":"2026-01-05T21:52:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.884577 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.884670 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.884689 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.884718 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.884737 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:35Z","lastTransitionTime":"2026-01-05T21:52:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.988465 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.988665 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.988690 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.988718 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:35 crc kubenswrapper[4910]: I0105 21:52:35.988740 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:35Z","lastTransitionTime":"2026-01-05T21:52:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.091628 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.091704 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.091729 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.091765 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.091785 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:36Z","lastTransitionTime":"2026-01-05T21:52:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.194804 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.194889 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.194908 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.194941 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.194964 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:36Z","lastTransitionTime":"2026-01-05T21:52:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.298657 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.298705 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.298715 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.298743 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.298752 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:36Z","lastTransitionTime":"2026-01-05T21:52:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.402033 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.402108 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.402183 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.402216 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.402235 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:36Z","lastTransitionTime":"2026-01-05T21:52:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.504655 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.504855 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.504938 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.505019 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.505113 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:36Z","lastTransitionTime":"2026-01-05T21:52:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.607998 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.608093 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.608117 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.608194 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.608216 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:36Z","lastTransitionTime":"2026-01-05T21:52:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.711647 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.711712 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.711733 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.711754 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.711768 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:36Z","lastTransitionTime":"2026-01-05T21:52:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.720744 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.720831 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.720904 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:52:36 crc kubenswrapper[4910]: E0105 21:52:36.720982 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:52:36 crc kubenswrapper[4910]: E0105 21:52:36.721167 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:52:36 crc kubenswrapper[4910]: E0105 21:52:36.721245 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.815481 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.815562 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.815584 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.815615 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.815634 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:36Z","lastTransitionTime":"2026-01-05T21:52:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.919193 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.919302 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.919320 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.919347 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:36 crc kubenswrapper[4910]: I0105 21:52:36.919367 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:36Z","lastTransitionTime":"2026-01-05T21:52:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.022525 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.022584 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.022598 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.022618 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.022676 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:37Z","lastTransitionTime":"2026-01-05T21:52:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.125936 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.126004 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.126038 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.126058 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.126071 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:37Z","lastTransitionTime":"2026-01-05T21:52:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.228982 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.229030 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.229041 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.229058 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.229069 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:37Z","lastTransitionTime":"2026-01-05T21:52:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.331709 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.331772 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.331789 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.331814 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.331834 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:37Z","lastTransitionTime":"2026-01-05T21:52:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.434947 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.434987 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.434996 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.435011 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.435023 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:37Z","lastTransitionTime":"2026-01-05T21:52:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.538667 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.538729 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.538745 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.538770 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.538787 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:37Z","lastTransitionTime":"2026-01-05T21:52:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.642047 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.642176 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.642211 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.642250 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.642274 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:37Z","lastTransitionTime":"2026-01-05T21:52:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.721416 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:37 crc kubenswrapper[4910]: E0105 21:52:37.721636 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.746265 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.746332 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.746352 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.746377 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.746398 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:37Z","lastTransitionTime":"2026-01-05T21:52:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.848754 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.848808 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.848825 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.848849 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.848866 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:37Z","lastTransitionTime":"2026-01-05T21:52:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.951823 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.951896 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.951918 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.951945 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:37 crc kubenswrapper[4910]: I0105 21:52:37.951965 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:37Z","lastTransitionTime":"2026-01-05T21:52:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.055049 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.055102 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.055113 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.055169 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.055182 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:38Z","lastTransitionTime":"2026-01-05T21:52:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.158352 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.158397 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.158406 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.158422 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.158433 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:38Z","lastTransitionTime":"2026-01-05T21:52:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.262663 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.262699 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.262707 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.262721 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.262731 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:38Z","lastTransitionTime":"2026-01-05T21:52:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.366229 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.366279 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.366292 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.366315 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.366326 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:38Z","lastTransitionTime":"2026-01-05T21:52:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.469658 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.469737 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.469755 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.469786 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.469808 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:38Z","lastTransitionTime":"2026-01-05T21:52:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.573259 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.573319 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.573335 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.573366 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.573383 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:38Z","lastTransitionTime":"2026-01-05T21:52:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.676644 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.676690 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.676707 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.676732 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.676762 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:38Z","lastTransitionTime":"2026-01-05T21:52:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.721460 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:52:38 crc kubenswrapper[4910]: E0105 21:52:38.721735 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.722612 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:52:38 crc kubenswrapper[4910]: E0105 21:52:38.722748 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.722985 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:52:38 crc kubenswrapper[4910]: E0105 21:52:38.723113 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.779829 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.780161 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.780279 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.780424 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.780534 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:38Z","lastTransitionTime":"2026-01-05T21:52:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.781521 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.800570 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.824230 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638c1985fe0a3b8f01687e06ff21e90cc026dadac8f6ee435d9c695c85a6ae48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3f374683807a8c6f06bb4e6ff3dab813a47759f249583a9bd1b879619b1d1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-t58h4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.840378 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.851634 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42d05c09-eb4c-4ee9-a5e2-e91e3a42ceaf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c366ec1be5116c8015777a182415c623173912f309b8dcc52e2dd58be79908ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://82b9d35b7a2b2ca1de438b27b3280478cbd8aa200a186456585bc20994359e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20c3f8271da0182ae792c01d42dc43c0732466b8d049fbc27a95f86a28da1ef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ba8aaa4607532622d43bf6d56597e26066226abc45454f5be583c22c41b108e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ba8aaa4607532622d43bf6d56597e26066226abc45454f5be583c22c41b108e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.865817 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.881536 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.882868 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.882901 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.882913 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.882933 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.882943 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:38Z","lastTransitionTime":"2026-01-05T21:52:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.895809 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.904607 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.914703 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"225840c3-7ddd-4637-b60d-6cd20db05d52\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88a32f17e02a9d35c306705ff1ac0f65b2d02d2a7f376412f37632608dbc2711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://615555f2bb3e1c61416edc24a76eddcfe181a553284add131686a55edbadb29c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://615555f2bb3e1c61416edc24a76eddcfe181a553284add131686a55edbadb29c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.927280 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.937830 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.951597 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3190d750f1a136cbc29252b21fa6112912e3884803e4b9ab4d3243eddeebf5c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.974582 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:52:28Z\\\",\\\"message\\\":\\\" 6977 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0105 21:52:27.895282 6977 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0105 21:52:27.895306 6977 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0105 21:52:27.895316 6977 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0105 21:52:27.895353 6977 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0105 21:52:27.895419 6977 factory.go:656] Stopping watch factory\\\\nI0105 21:52:27.895451 6977 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0105 21:52:27.895449 6977 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:52:27.895467 6977 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0105 21:52:27.895483 6977 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0105 21:52:27.895496 6977 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0105 21:52:27.895509 6977 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0105 21:52:27.895522 6977 handler.go:208] Removed *v1.Node event handler 2\\\\nI0105 21:52:27.895536 6977 handler.go:208] Removed *v1.Node event handler 7\\\\nI0105 21:52:27.895657 6977 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:52:27Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-fpk76_openshift-ovn-kubernetes(f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.986112 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.986208 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.986230 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.986709 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.986764 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:38Z","lastTransitionTime":"2026-01-05T21:52:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:38 crc kubenswrapper[4910]: I0105 21:52:38.990299 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mns6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74c455b1-4706-4ca7-bd82-2b99c3c83e3f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mns6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:38Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.010286 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:39Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.032010 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:39Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.050435 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f84f3608a1f16a89bb0b2bd33ddfd1fd31073c40e4528dd2de478f96cf60a75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:52:18Z\\\",\\\"message\\\":\\\"2026-01-05T21:51:32+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_53905aff-a543-4a55-8508-3702b4400eb8\\\\n2026-01-05T21:51:32+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_53905aff-a543-4a55-8508-3702b4400eb8 to /host/opt/cni/bin/\\\\n2026-01-05T21:51:33Z [verbose] multus-daemon started\\\\n2026-01-05T21:51:33Z [verbose] Readiness Indicator file check\\\\n2026-01-05T21:52:18Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:52:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:39Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.067431 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:39Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.090731 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.090822 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.090845 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.090901 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.090923 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:39Z","lastTransitionTime":"2026-01-05T21:52:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.193491 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.193789 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.193913 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.194023 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.194135 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:39Z","lastTransitionTime":"2026-01-05T21:52:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.297558 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.297858 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.297981 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.298231 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.298332 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:39Z","lastTransitionTime":"2026-01-05T21:52:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.402338 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.402621 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.402758 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.402887 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.403029 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:39Z","lastTransitionTime":"2026-01-05T21:52:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.506295 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.506706 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.506819 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.506943 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.507063 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:39Z","lastTransitionTime":"2026-01-05T21:52:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.610387 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.610752 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.610939 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.611225 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.611430 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:39Z","lastTransitionTime":"2026-01-05T21:52:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.715072 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.715171 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.715192 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.715224 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.715244 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:39Z","lastTransitionTime":"2026-01-05T21:52:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.721481 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:39 crc kubenswrapper[4910]: E0105 21:52:39.721861 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.818614 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.818935 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.819345 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.819537 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.820213 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:39Z","lastTransitionTime":"2026-01-05T21:52:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.923646 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.923694 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.923713 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.923748 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:39 crc kubenswrapper[4910]: I0105 21:52:39.923770 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:39Z","lastTransitionTime":"2026-01-05T21:52:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.027055 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.027176 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.027205 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.027241 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.027266 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:40Z","lastTransitionTime":"2026-01-05T21:52:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.130057 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.130115 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.130180 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.130216 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.130239 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:40Z","lastTransitionTime":"2026-01-05T21:52:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.233611 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.233686 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.233705 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.233732 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.233753 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:40Z","lastTransitionTime":"2026-01-05T21:52:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.337499 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.337567 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.337585 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.337617 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.337637 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:40Z","lastTransitionTime":"2026-01-05T21:52:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.440670 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.440787 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.440807 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.440835 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.440854 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:40Z","lastTransitionTime":"2026-01-05T21:52:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.544404 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.544490 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.544523 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.544553 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.544575 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:40Z","lastTransitionTime":"2026-01-05T21:52:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.648638 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.648705 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.648721 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.648750 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.648770 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:40Z","lastTransitionTime":"2026-01-05T21:52:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.720542 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:52:40 crc kubenswrapper[4910]: E0105 21:52:40.720792 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.721765 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.722031 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:52:40 crc kubenswrapper[4910]: E0105 21:52:40.722298 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.722493 4910 scope.go:117] "RemoveContainer" containerID="b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed" Jan 05 21:52:40 crc kubenswrapper[4910]: E0105 21:52:40.722769 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-fpk76_openshift-ovn-kubernetes(f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" Jan 05 21:52:40 crc kubenswrapper[4910]: E0105 21:52:40.722829 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.751954 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.752035 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.752055 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.752088 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.752114 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:40Z","lastTransitionTime":"2026-01-05T21:52:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.855830 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.855905 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.855923 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.855949 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.855971 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:40Z","lastTransitionTime":"2026-01-05T21:52:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.959064 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.959111 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.959141 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.959164 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:40 crc kubenswrapper[4910]: I0105 21:52:40.959179 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:40Z","lastTransitionTime":"2026-01-05T21:52:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.063176 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.063263 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.063289 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.063319 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.063341 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:41Z","lastTransitionTime":"2026-01-05T21:52:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.167385 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.167481 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.167498 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.167563 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.167586 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:41Z","lastTransitionTime":"2026-01-05T21:52:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.270743 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.270790 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.270800 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.270817 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.270826 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:41Z","lastTransitionTime":"2026-01-05T21:52:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.373724 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.373779 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.373791 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.373812 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.373825 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:41Z","lastTransitionTime":"2026-01-05T21:52:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.476482 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.476529 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.476538 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.476558 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.476574 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:41Z","lastTransitionTime":"2026-01-05T21:52:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.579499 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.579544 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.579556 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.579574 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.579582 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:41Z","lastTransitionTime":"2026-01-05T21:52:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.681942 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.682015 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.682024 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.682039 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.682059 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:41Z","lastTransitionTime":"2026-01-05T21:52:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.721075 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:41 crc kubenswrapper[4910]: E0105 21:52:41.721417 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.785856 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.785924 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.785941 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.785968 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.785987 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:41Z","lastTransitionTime":"2026-01-05T21:52:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.889852 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.889935 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.889954 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.889984 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.890001 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:41Z","lastTransitionTime":"2026-01-05T21:52:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.993564 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.993632 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.993650 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.993676 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:41 crc kubenswrapper[4910]: I0105 21:52:41.993697 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:41Z","lastTransitionTime":"2026-01-05T21:52:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.097084 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.097207 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.097231 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.097264 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.097286 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:42Z","lastTransitionTime":"2026-01-05T21:52:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.201682 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.201767 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.201789 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.201828 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.201852 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:42Z","lastTransitionTime":"2026-01-05T21:52:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.305448 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.305518 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.305538 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.305565 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.305584 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:42Z","lastTransitionTime":"2026-01-05T21:52:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.409346 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.409497 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.409523 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.409553 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.409576 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:42Z","lastTransitionTime":"2026-01-05T21:52:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.421695 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.421742 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.421755 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.421774 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.421787 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:42Z","lastTransitionTime":"2026-01-05T21:52:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:42 crc kubenswrapper[4910]: E0105 21:52:42.442048 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:42Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.447743 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.447808 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.447833 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.447861 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.447883 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:42Z","lastTransitionTime":"2026-01-05T21:52:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:42 crc kubenswrapper[4910]: E0105 21:52:42.467328 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:42Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.473525 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.473601 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.473626 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.473655 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.473677 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:42Z","lastTransitionTime":"2026-01-05T21:52:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:42 crc kubenswrapper[4910]: E0105 21:52:42.494614 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:42Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.500497 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.500578 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.500596 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.500631 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.500652 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:42Z","lastTransitionTime":"2026-01-05T21:52:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:42 crc kubenswrapper[4910]: E0105 21:52:42.523368 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:42Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.529400 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.529445 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.529459 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.529480 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.529492 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:42Z","lastTransitionTime":"2026-01-05T21:52:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:42 crc kubenswrapper[4910]: E0105 21:52:42.549588 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:42Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:42 crc kubenswrapper[4910]: E0105 21:52:42.550143 4910 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.552556 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.552622 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.552640 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.552668 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.552693 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:42Z","lastTransitionTime":"2026-01-05T21:52:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.656606 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.656681 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.656705 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.656740 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.656760 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:42Z","lastTransitionTime":"2026-01-05T21:52:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.721459 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.721556 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:52:42 crc kubenswrapper[4910]: E0105 21:52:42.721672 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.721556 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:52:42 crc kubenswrapper[4910]: E0105 21:52:42.721811 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:52:42 crc kubenswrapper[4910]: E0105 21:52:42.721908 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.761935 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.762009 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.762036 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.762066 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.762091 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:42Z","lastTransitionTime":"2026-01-05T21:52:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.865981 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.866107 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.866155 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.866181 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.866200 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:42Z","lastTransitionTime":"2026-01-05T21:52:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.969966 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.970408 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.970553 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.970741 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:42 crc kubenswrapper[4910]: I0105 21:52:42.970884 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:42Z","lastTransitionTime":"2026-01-05T21:52:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.074012 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.074076 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.074090 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.074110 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.074168 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:43Z","lastTransitionTime":"2026-01-05T21:52:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.177527 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.177575 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.177607 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.177630 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.177640 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:43Z","lastTransitionTime":"2026-01-05T21:52:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.280331 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.280901 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.281108 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.281380 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.281604 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:43Z","lastTransitionTime":"2026-01-05T21:52:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.387459 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.388184 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.388407 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.388621 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.388833 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:43Z","lastTransitionTime":"2026-01-05T21:52:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.493065 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.493180 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.493198 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.493222 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.493246 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:43Z","lastTransitionTime":"2026-01-05T21:52:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.597049 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.597161 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.597187 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.597219 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.597239 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:43Z","lastTransitionTime":"2026-01-05T21:52:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.701036 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.701085 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.701102 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.701172 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.701194 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:43Z","lastTransitionTime":"2026-01-05T21:52:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.721380 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:43 crc kubenswrapper[4910]: E0105 21:52:43.722630 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.804815 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.804880 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.804899 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.804924 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.804945 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:43Z","lastTransitionTime":"2026-01-05T21:52:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.907296 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.907354 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.907371 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.907396 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:43 crc kubenswrapper[4910]: I0105 21:52:43.907414 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:43Z","lastTransitionTime":"2026-01-05T21:52:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.010060 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.010105 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.010136 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.010156 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.010168 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:44Z","lastTransitionTime":"2026-01-05T21:52:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.112944 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.113656 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.113736 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.113813 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.113874 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:44Z","lastTransitionTime":"2026-01-05T21:52:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.217306 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.217394 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.217423 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.217456 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.217480 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:44Z","lastTransitionTime":"2026-01-05T21:52:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.321189 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.321282 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.321310 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.321343 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.321367 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:44Z","lastTransitionTime":"2026-01-05T21:52:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.424812 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.424896 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.424915 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.424943 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.424963 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:44Z","lastTransitionTime":"2026-01-05T21:52:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.528988 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.529076 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.529096 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.529184 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.529206 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:44Z","lastTransitionTime":"2026-01-05T21:52:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.634174 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.634270 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.634295 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.634325 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.634350 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:44Z","lastTransitionTime":"2026-01-05T21:52:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.720797 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.720869 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:52:44 crc kubenswrapper[4910]: E0105 21:52:44.721059 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.721112 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:52:44 crc kubenswrapper[4910]: E0105 21:52:44.721377 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:52:44 crc kubenswrapper[4910]: E0105 21:52:44.721511 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.737269 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.737369 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.737386 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.737412 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.737434 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:44Z","lastTransitionTime":"2026-01-05T21:52:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.839888 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.839941 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.839954 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.839974 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.839989 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:44Z","lastTransitionTime":"2026-01-05T21:52:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.942881 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.942955 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.942984 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.943020 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:44 crc kubenswrapper[4910]: I0105 21:52:44.943045 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:44Z","lastTransitionTime":"2026-01-05T21:52:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.046475 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.046515 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.046525 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.046544 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.046553 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:45Z","lastTransitionTime":"2026-01-05T21:52:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.149191 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.149244 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.149257 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.149279 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.149293 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:45Z","lastTransitionTime":"2026-01-05T21:52:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.252618 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.252677 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.252694 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.252720 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.252741 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:45Z","lastTransitionTime":"2026-01-05T21:52:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.355753 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.356166 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.356284 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.356390 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.356463 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:45Z","lastTransitionTime":"2026-01-05T21:52:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.460509 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.460561 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.460573 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.460595 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.460610 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:45Z","lastTransitionTime":"2026-01-05T21:52:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.564423 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.564499 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.564517 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.564574 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.564596 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:45Z","lastTransitionTime":"2026-01-05T21:52:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.668216 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.668270 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.668281 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.668303 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.668314 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:45Z","lastTransitionTime":"2026-01-05T21:52:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.721380 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:45 crc kubenswrapper[4910]: E0105 21:52:45.721638 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.771495 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.771572 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.771594 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.771627 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.771653 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:45Z","lastTransitionTime":"2026-01-05T21:52:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.875372 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.875434 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.875451 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.875472 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.875486 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:45Z","lastTransitionTime":"2026-01-05T21:52:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.982491 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.982577 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.982599 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.982628 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:45 crc kubenswrapper[4910]: I0105 21:52:45.982657 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:45Z","lastTransitionTime":"2026-01-05T21:52:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.085686 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.085739 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.085750 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.085768 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.085780 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:46Z","lastTransitionTime":"2026-01-05T21:52:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.189376 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.189444 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.189463 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.189496 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.189517 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:46Z","lastTransitionTime":"2026-01-05T21:52:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.292397 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.292446 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.292457 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.292474 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.292487 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:46Z","lastTransitionTime":"2026-01-05T21:52:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.395890 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.395960 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.395984 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.396018 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.396042 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:46Z","lastTransitionTime":"2026-01-05T21:52:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.499315 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.499362 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.499374 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.499392 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.499406 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:46Z","lastTransitionTime":"2026-01-05T21:52:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.602737 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.602806 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.602830 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.602863 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.602889 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:46Z","lastTransitionTime":"2026-01-05T21:52:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.706209 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.706272 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.706290 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.706318 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.706338 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:46Z","lastTransitionTime":"2026-01-05T21:52:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.720882 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.720976 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:52:46 crc kubenswrapper[4910]: E0105 21:52:46.721147 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.721496 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:52:46 crc kubenswrapper[4910]: E0105 21:52:46.721656 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:52:46 crc kubenswrapper[4910]: E0105 21:52:46.722021 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.810187 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.810261 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.810283 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.810310 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.810331 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:46Z","lastTransitionTime":"2026-01-05T21:52:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.913723 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.914209 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.914368 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.914520 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:46 crc kubenswrapper[4910]: I0105 21:52:46.914660 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:46Z","lastTransitionTime":"2026-01-05T21:52:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.018226 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.018289 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.018309 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.018336 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.018357 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:47Z","lastTransitionTime":"2026-01-05T21:52:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.122792 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.122897 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.122915 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.122944 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.122964 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:47Z","lastTransitionTime":"2026-01-05T21:52:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.227235 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.227310 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.227334 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.227371 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.227389 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:47Z","lastTransitionTime":"2026-01-05T21:52:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.330592 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.330643 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.330655 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.330675 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.330685 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:47Z","lastTransitionTime":"2026-01-05T21:52:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.434213 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.434294 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.434311 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.434338 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.434353 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:47Z","lastTransitionTime":"2026-01-05T21:52:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.537205 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.537291 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.537310 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.537345 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.537369 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:47Z","lastTransitionTime":"2026-01-05T21:52:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.640651 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.640721 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.640740 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.640767 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.640788 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:47Z","lastTransitionTime":"2026-01-05T21:52:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.721039 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:47 crc kubenswrapper[4910]: E0105 21:52:47.721387 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.744277 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.744331 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.744348 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.744374 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.744388 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:47Z","lastTransitionTime":"2026-01-05T21:52:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.848072 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.848176 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.848195 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.848225 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.848243 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:47Z","lastTransitionTime":"2026-01-05T21:52:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.951681 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.951734 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.951748 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.951772 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:47 crc kubenswrapper[4910]: I0105 21:52:47.951787 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:47Z","lastTransitionTime":"2026-01-05T21:52:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.055671 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.055742 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.055765 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.055795 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.055821 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:48Z","lastTransitionTime":"2026-01-05T21:52:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.160705 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.160785 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.160804 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.160836 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.160855 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:48Z","lastTransitionTime":"2026-01-05T21:52:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.264543 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.264603 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.264619 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.264647 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.264667 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:48Z","lastTransitionTime":"2026-01-05T21:52:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.368221 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.368310 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.368334 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.368371 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.368395 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:48Z","lastTransitionTime":"2026-01-05T21:52:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.474089 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.474208 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.474254 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.474299 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.474337 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:48Z","lastTransitionTime":"2026-01-05T21:52:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.578787 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.579385 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.579532 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.579683 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.579812 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:48Z","lastTransitionTime":"2026-01-05T21:52:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.683667 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.683798 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.683829 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.683864 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.683893 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:48Z","lastTransitionTime":"2026-01-05T21:52:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.721071 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.721104 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:52:48 crc kubenswrapper[4910]: E0105 21:52:48.721386 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.721506 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:52:48 crc kubenswrapper[4910]: E0105 21:52:48.721568 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:52:48 crc kubenswrapper[4910]: E0105 21:52:48.721766 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.746774 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede818e357f3b380f8f3e44a3193ca81dce5720f9ce86fb0eb52dcfb92b4b969\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a21907688b7eed61d378bcba7f2b8f6b7ad5f2c65d6ff1e4848f6a770fe7f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:48Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.766670 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9e4e2a14-7b3c-4fbe-8ec5-11428c5a0d5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638c1985fe0a3b8f01687e06ff21e90cc026dadac8f6ee435d9c695c85a6ae48\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3f374683807a8c6f06bb4e6ff3dab813a47759f249583a9bd1b879619b1d1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpbhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-t58h4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:48Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.788915 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.789465 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.789808 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.790029 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.790286 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:48Z","lastTransitionTime":"2026-01-05T21:52:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.792904 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"969290f7-140e-4c49-a197-cfab07022a17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\" 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0105 21:51:21.157966 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\nI0105 21:51:26.644322 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0105 21:51:26.650199 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0105 21:51:26.650262 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0105 21:51:26.650322 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0105 21:51:26.650342 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0105 21:51:26.664189 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0105 21:51:26.664224 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664231 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0105 21:51:26.664238 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0105 21:51:26.664244 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0105 21:51:26.664248 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0105 21:51:26.664253 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0105 21:51:26.664505 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0105 21:51:26.668974 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0105 21:51:26.669306 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2860077857/tls.crt::/tmp/serving-cert-2860077857/tls.key\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:48Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.817975 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42d05c09-eb4c-4ee9-a5e2-e91e3a42ceaf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c366ec1be5116c8015777a182415c623173912f309b8dcc52e2dd58be79908ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://82b9d35b7a2b2ca1de438b27b3280478cbd8aa200a186456585bc20994359e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20c3f8271da0182ae792c01d42dc43c0732466b8d049fbc27a95f86a28da1ef9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ba8aaa4607532622d43bf6d56597e26066226abc45454f5be583c22c41b108e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ba8aaa4607532622d43bf6d56597e26066226abc45454f5be583c22c41b108e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:48Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.843796 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76237fe4a60cedc0cb1397e1f0e0f91516a86112e9eb6a6d1dffc2ccd61bbbb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:48Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.862246 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82ef2ad8054fcc6ea646a786f8dd9da8956895b836884f07dca878e10dd8753d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:48Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.880725 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6566d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49dcd7ad-de44-4aa1-ba88-b7377edbdf0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1034e2821165d377405b573805d83ffdff2927d7bb03d3cf435a2a8937fcad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p9n8r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6566d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:48Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.895100 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.895215 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.895240 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.895268 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.895288 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:48Z","lastTransitionTime":"2026-01-05T21:52:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.898034 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-458lg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8004e61-6340-451e-899d-da531d593315\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1adc17513f6a5195625a1fdd4b42ed18bac1c80f2ff454dac30d86cb34809668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ss8ht\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-458lg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:48Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.915890 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"225840c3-7ddd-4637-b60d-6cd20db05d52\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88a32f17e02a9d35c306705ff1ac0f65b2d02d2a7f376412f37632608dbc2711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://615555f2bb3e1c61416edc24a76eddcfe181a553284add131686a55edbadb29c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://615555f2bb3e1c61416edc24a76eddcfe181a553284add131686a55edbadb29c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:48Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.937582 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f74a2fe-99d3-4811-bae2-bfe6bd76a98f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c36bd46cf0c1a8e42b5c03b0bc8747a2266a374bf8d8923ff08097566841e872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://88b2d7b39d033e6a63ed6fec55f184898153bb9c59a23708572c742e0c4eea17\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eab14532bf53c2546892d2178c834917370a124b90929ab1e8930baa7ac0923\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:48Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.959223 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:48Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.979035 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hflcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"474e7e8c-c9f5-4f54-81c9-0976bcc6565d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3190d750f1a136cbc29252b21fa6112912e3884803e4b9ab4d3243eddeebf5c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e427147398c222225390230bc1779ec7936bae28509274f7f56dbdec1680eb65\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92963029710a006d5e396621c0ec555b3a3bda83007d0882db3b1e74618e6913\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a833012fbb5c2fd1956ad308082db95bee78bc271e4b68422dd335c69fbff0d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ad9df3d91334c6c4300dcbf5ff5263a86f4c772f76d3be348f0d079e2fc46c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d560f5103f8e20cf51bbfa39267338cdeece9a1b34f0488927eb9d888274deef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8b29244db82b2f64611de2f19b0f158107a09b3830430411f702110cfed555\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k6mj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hflcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:48Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.999818 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.999879 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.999897 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.999923 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:48 crc kubenswrapper[4910]: I0105 21:52:48.999940 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:48Z","lastTransitionTime":"2026-01-05T21:52:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.006825 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:52:28Z\\\",\\\"message\\\":\\\" 6977 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0105 21:52:27.895282 6977 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0105 21:52:27.895306 6977 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0105 21:52:27.895316 6977 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0105 21:52:27.895353 6977 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0105 21:52:27.895419 6977 factory.go:656] Stopping watch factory\\\\nI0105 21:52:27.895451 6977 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0105 21:52:27.895449 6977 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0105 21:52:27.895467 6977 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0105 21:52:27.895483 6977 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0105 21:52:27.895496 6977 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0105 21:52:27.895509 6977 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0105 21:52:27.895522 6977 handler.go:208] Removed *v1.Node event handler 2\\\\nI0105 21:52:27.895536 6977 handler.go:208] Removed *v1.Node event handler 7\\\\nI0105 21:52:27.895657 6977 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:52:27Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-fpk76_openshift-ovn-kubernetes(f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gjpvg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpk76\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:49Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.020182 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mns6n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74c455b1-4706-4ca7-bd82-2b99c3c83e3f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rr4qc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mns6n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:49Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.052194 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c9d1645-a572-40e0-a755-46846ed8d9a3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee59ff2b71b45ae99b3642c7302e49b7355142e89f83905adce8ddb93006678b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e9e67c10c5e809fb379666ecb651904a66eb788b29139b15717f0d03840ea1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8845cfb33a35e82451e4d6743290a24fc0d6051805b236044138f5f2de87c52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7be6eb1d527eea16ff947d8914d95f6479627f0cc17a92435ae361b54f4d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3fb78fd73cc11c0ccff247d466646f666585c83b4b08ecf8dac911733bccea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51c765081ac86172b71e688f797bb2c41fb379c453f6d030b852ab8fbad8aeb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12342da7294c2fd54879b18d6f17c66639c6865b1d69640add1847e604a75bbe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://aafcad504da8e5a2b61ff494d80edc3d7d0f86fa0fb035ad20d9a31f65ada1c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-05T21:51:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:49Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.073089 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:49Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.094862 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:26Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:49Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.102172 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.102357 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.102488 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.102599 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.102684 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:49Z","lastTransitionTime":"2026-01-05T21:52:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.118670 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-9zscm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"07ebbe82-9e6e-47a5-91a7-4b515efc78db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f84f3608a1f16a89bb0b2bd33ddfd1fd31073c40e4528dd2de478f96cf60a75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-05T21:52:18Z\\\",\\\"message\\\":\\\"2026-01-05T21:51:32+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_53905aff-a543-4a55-8508-3702b4400eb8\\\\n2026-01-05T21:51:32+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_53905aff-a543-4a55-8508-3702b4400eb8 to /host/opt/cni/bin/\\\\n2026-01-05T21:51:33Z [verbose] multus-daemon started\\\\n2026-01-05T21:51:33Z [verbose] Readiness Indicator file check\\\\n2026-01-05T21:52:18Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:52:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kh5qp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-9zscm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:49Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.136543 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1180e67b-86e7-4aa8-b84f-55e2a18a7918\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-05T21:51:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://134d261eb506da1b38a497411f916ed14dfd2ad80230e5a4eaf9617ed134f81b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-05T21:51:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqx46\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-05T21:51:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-p4t85\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:49Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.205929 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.206039 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.206066 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.206110 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.206175 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:49Z","lastTransitionTime":"2026-01-05T21:52:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.308662 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.308740 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.308766 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.308797 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.308821 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:49Z","lastTransitionTime":"2026-01-05T21:52:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.412571 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.412640 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.412660 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.412687 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.412705 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:49Z","lastTransitionTime":"2026-01-05T21:52:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.515816 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.515890 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.515908 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.515940 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.515961 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:49Z","lastTransitionTime":"2026-01-05T21:52:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.619005 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.619067 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.619085 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.619286 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.619347 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:49Z","lastTransitionTime":"2026-01-05T21:52:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.720482 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:49 crc kubenswrapper[4910]: E0105 21:52:49.720753 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.723089 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.723194 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.723220 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.723249 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.723271 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:49Z","lastTransitionTime":"2026-01-05T21:52:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.827810 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.828302 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.828467 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.828607 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.828754 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:49Z","lastTransitionTime":"2026-01-05T21:52:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.933263 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.933336 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.933354 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.933385 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:49 crc kubenswrapper[4910]: I0105 21:52:49.933403 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:49Z","lastTransitionTime":"2026-01-05T21:52:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.036989 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.037045 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.037062 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.037090 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.037109 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:50Z","lastTransitionTime":"2026-01-05T21:52:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.107601 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/74c455b1-4706-4ca7-bd82-2b99c3c83e3f-metrics-certs\") pod \"network-metrics-daemon-mns6n\" (UID: \"74c455b1-4706-4ca7-bd82-2b99c3c83e3f\") " pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:50 crc kubenswrapper[4910]: E0105 21:52:50.107862 4910 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 05 21:52:50 crc kubenswrapper[4910]: E0105 21:52:50.107997 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/74c455b1-4706-4ca7-bd82-2b99c3c83e3f-metrics-certs podName:74c455b1-4706-4ca7-bd82-2b99c3c83e3f nodeName:}" failed. No retries permitted until 2026-01-05 21:53:54.107970493 +0000 UTC m=+165.685468243 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/74c455b1-4706-4ca7-bd82-2b99c3c83e3f-metrics-certs") pod "network-metrics-daemon-mns6n" (UID: "74c455b1-4706-4ca7-bd82-2b99c3c83e3f") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.140555 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.140592 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.140602 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.140619 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.140635 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:50Z","lastTransitionTime":"2026-01-05T21:52:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.243601 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.243644 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.243653 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.243671 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.243682 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:50Z","lastTransitionTime":"2026-01-05T21:52:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.346040 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.346096 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.346114 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.346164 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.346183 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:50Z","lastTransitionTime":"2026-01-05T21:52:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.449411 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.449469 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.449483 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.449505 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.449518 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:50Z","lastTransitionTime":"2026-01-05T21:52:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.553370 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.553430 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.553442 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.553468 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.553503 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:50Z","lastTransitionTime":"2026-01-05T21:52:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.656071 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.656145 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.656165 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.656190 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.656208 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:50Z","lastTransitionTime":"2026-01-05T21:52:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.721646 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:52:50 crc kubenswrapper[4910]: E0105 21:52:50.721908 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.721666 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.722401 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:52:50 crc kubenswrapper[4910]: E0105 21:52:50.722647 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:52:50 crc kubenswrapper[4910]: E0105 21:52:50.722826 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.758660 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.758720 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.758743 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.758772 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.758793 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:50Z","lastTransitionTime":"2026-01-05T21:52:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.862408 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.862522 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.862546 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.862583 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.862607 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:50Z","lastTransitionTime":"2026-01-05T21:52:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.966019 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.966264 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.966297 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.966339 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:50 crc kubenswrapper[4910]: I0105 21:52:50.966371 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:50Z","lastTransitionTime":"2026-01-05T21:52:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.069481 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.069542 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.069556 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.069580 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.069594 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:51Z","lastTransitionTime":"2026-01-05T21:52:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.172542 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.173084 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.173316 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.173499 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.173718 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:51Z","lastTransitionTime":"2026-01-05T21:52:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.276888 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.277367 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.277606 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.277928 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.278200 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:51Z","lastTransitionTime":"2026-01-05T21:52:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.381910 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.382562 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.382601 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.382624 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.382643 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:51Z","lastTransitionTime":"2026-01-05T21:52:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.486436 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.486525 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.486551 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.486581 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.486600 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:51Z","lastTransitionTime":"2026-01-05T21:52:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.590644 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.590714 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.590742 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.590774 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.590798 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:51Z","lastTransitionTime":"2026-01-05T21:52:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.695008 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.695173 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.695197 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.695235 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.695262 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:51Z","lastTransitionTime":"2026-01-05T21:52:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.721189 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:51 crc kubenswrapper[4910]: E0105 21:52:51.721446 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.722737 4910 scope.go:117] "RemoveContainer" containerID="b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed" Jan 05 21:52:51 crc kubenswrapper[4910]: E0105 21:52:51.723064 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-fpk76_openshift-ovn-kubernetes(f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.798651 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.798717 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.798735 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.798764 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.798783 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:51Z","lastTransitionTime":"2026-01-05T21:52:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.901994 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.902068 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.902086 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.902115 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:51 crc kubenswrapper[4910]: I0105 21:52:51.902164 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:51Z","lastTransitionTime":"2026-01-05T21:52:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.005593 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.005665 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.005693 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.005723 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.005746 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:52Z","lastTransitionTime":"2026-01-05T21:52:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.109580 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.109653 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.109673 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.109701 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.109723 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:52Z","lastTransitionTime":"2026-01-05T21:52:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.213436 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.213497 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.213518 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.213541 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.213557 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:52Z","lastTransitionTime":"2026-01-05T21:52:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.315545 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.315617 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.315640 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.315668 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.315692 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:52Z","lastTransitionTime":"2026-01-05T21:52:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.419281 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.419356 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.419380 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.419411 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.419432 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:52Z","lastTransitionTime":"2026-01-05T21:52:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.522649 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.522700 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.522715 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.522735 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.522747 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:52Z","lastTransitionTime":"2026-01-05T21:52:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.627710 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.627794 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.627819 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.627852 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.627876 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:52Z","lastTransitionTime":"2026-01-05T21:52:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.720837 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:52:52 crc kubenswrapper[4910]: E0105 21:52:52.721033 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.721091 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.721215 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:52:52 crc kubenswrapper[4910]: E0105 21:52:52.721286 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:52:52 crc kubenswrapper[4910]: E0105 21:52:52.721514 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.730744 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.730855 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.730925 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.730953 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.730978 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:52Z","lastTransitionTime":"2026-01-05T21:52:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.775845 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.775911 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.775931 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.775961 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.775985 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:52Z","lastTransitionTime":"2026-01-05T21:52:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:52 crc kubenswrapper[4910]: E0105 21:52:52.793332 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:52Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.799284 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.799351 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.799368 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.799393 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.799411 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:52Z","lastTransitionTime":"2026-01-05T21:52:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:52 crc kubenswrapper[4910]: E0105 21:52:52.816812 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:52Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.822011 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.822039 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.822048 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.822063 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.822074 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:52Z","lastTransitionTime":"2026-01-05T21:52:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:52 crc kubenswrapper[4910]: E0105 21:52:52.835616 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:52Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.840899 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.840934 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.840943 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.840960 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.840970 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:52Z","lastTransitionTime":"2026-01-05T21:52:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:52 crc kubenswrapper[4910]: E0105 21:52:52.854672 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:52Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.859877 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.859960 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.859981 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.860012 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.860032 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:52Z","lastTransitionTime":"2026-01-05T21:52:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:52 crc kubenswrapper[4910]: E0105 21:52:52.881680 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-05T21:52:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a68cccef-4498-48d1-bd1d-f77912a8fbc0\\\",\\\"systemUUID\\\":\\\"13985a1a-3617-450c-bc3b-e969b1c68d1d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-05T21:52:52Z is after 2025-08-24T17:21:41Z" Jan 05 21:52:52 crc kubenswrapper[4910]: E0105 21:52:52.881794 4910 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.884279 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.884304 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.884312 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.884324 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.884333 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:52Z","lastTransitionTime":"2026-01-05T21:52:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.987022 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.987080 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.987105 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.987150 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:52 crc kubenswrapper[4910]: I0105 21:52:52.987169 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:52Z","lastTransitionTime":"2026-01-05T21:52:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.090318 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.090354 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.090365 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.090382 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.090395 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:53Z","lastTransitionTime":"2026-01-05T21:52:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.193574 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.193607 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.193618 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.193631 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.193641 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:53Z","lastTransitionTime":"2026-01-05T21:52:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.296738 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.296787 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.296803 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.296826 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.296841 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:53Z","lastTransitionTime":"2026-01-05T21:52:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.400819 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.400904 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.400933 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.400963 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.400983 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:53Z","lastTransitionTime":"2026-01-05T21:52:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.505693 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.505798 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.505895 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.506010 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.506054 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:53Z","lastTransitionTime":"2026-01-05T21:52:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.610687 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.610771 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.610796 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.610830 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.610860 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:53Z","lastTransitionTime":"2026-01-05T21:52:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.717404 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.717576 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.717602 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.717664 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.717690 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:53Z","lastTransitionTime":"2026-01-05T21:52:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.721417 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:53 crc kubenswrapper[4910]: E0105 21:52:53.721716 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.821510 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.821604 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.821632 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.821667 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.821693 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:53Z","lastTransitionTime":"2026-01-05T21:52:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.926035 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.926081 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.926090 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.926108 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:53 crc kubenswrapper[4910]: I0105 21:52:53.926165 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:53Z","lastTransitionTime":"2026-01-05T21:52:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.031011 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.031075 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.031096 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.031149 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.031173 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:54Z","lastTransitionTime":"2026-01-05T21:52:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.135174 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.135239 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.135257 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.135283 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.135302 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:54Z","lastTransitionTime":"2026-01-05T21:52:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.239412 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.239483 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.239500 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.239527 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.239545 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:54Z","lastTransitionTime":"2026-01-05T21:52:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.343939 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.344012 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.344030 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.344062 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.344089 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:54Z","lastTransitionTime":"2026-01-05T21:52:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.448667 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.448741 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.448763 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.448793 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.448814 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:54Z","lastTransitionTime":"2026-01-05T21:52:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.552987 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.553067 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.553093 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.553164 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.553192 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:54Z","lastTransitionTime":"2026-01-05T21:52:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.656650 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.656709 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.656726 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.656752 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.656770 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:54Z","lastTransitionTime":"2026-01-05T21:52:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.721489 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.721503 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:52:54 crc kubenswrapper[4910]: E0105 21:52:54.721849 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:52:54 crc kubenswrapper[4910]: E0105 21:52:54.722019 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.722264 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:52:54 crc kubenswrapper[4910]: E0105 21:52:54.722454 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.759436 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.759498 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.759519 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.759544 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.759564 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:54Z","lastTransitionTime":"2026-01-05T21:52:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.863541 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.863603 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.863618 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.863639 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.863657 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:54Z","lastTransitionTime":"2026-01-05T21:52:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.967233 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.967576 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.967756 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.967917 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:54 crc kubenswrapper[4910]: I0105 21:52:54.968045 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:54Z","lastTransitionTime":"2026-01-05T21:52:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.071310 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.071383 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.071402 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.071432 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.071452 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:55Z","lastTransitionTime":"2026-01-05T21:52:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.174724 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.174787 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.174799 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.174821 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.174831 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:55Z","lastTransitionTime":"2026-01-05T21:52:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.279246 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.279347 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.279374 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.279416 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.279443 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:55Z","lastTransitionTime":"2026-01-05T21:52:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.383382 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.383453 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.383472 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.383499 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.383520 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:55Z","lastTransitionTime":"2026-01-05T21:52:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.487372 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.487511 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.487588 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.487673 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.487702 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:55Z","lastTransitionTime":"2026-01-05T21:52:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.591514 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.591592 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.591611 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.591642 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.591662 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:55Z","lastTransitionTime":"2026-01-05T21:52:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.694394 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.694454 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.694471 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.694498 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.694518 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:55Z","lastTransitionTime":"2026-01-05T21:52:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.721025 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:55 crc kubenswrapper[4910]: E0105 21:52:55.721266 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.797607 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.798292 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.798343 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.798374 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.798394 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:55Z","lastTransitionTime":"2026-01-05T21:52:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.902315 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.902807 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.902837 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.902867 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:55 crc kubenswrapper[4910]: I0105 21:52:55.902886 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:55Z","lastTransitionTime":"2026-01-05T21:52:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.005839 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.006295 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.006438 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.006544 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.006638 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:56Z","lastTransitionTime":"2026-01-05T21:52:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.111146 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.111223 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.111243 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.111274 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.111297 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:56Z","lastTransitionTime":"2026-01-05T21:52:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.216184 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.216647 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.216822 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.217010 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.217242 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:56Z","lastTransitionTime":"2026-01-05T21:52:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.321559 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.321911 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.322186 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.322423 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.322702 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:56Z","lastTransitionTime":"2026-01-05T21:52:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.426934 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.426995 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.427014 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.427046 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.427068 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:56Z","lastTransitionTime":"2026-01-05T21:52:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.530762 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.530827 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.530848 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.530917 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.530942 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:56Z","lastTransitionTime":"2026-01-05T21:52:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.634547 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.634626 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.634646 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.634676 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.634696 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:56Z","lastTransitionTime":"2026-01-05T21:52:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.720576 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.720603 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:52:56 crc kubenswrapper[4910]: E0105 21:52:56.720733 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.720759 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:52:56 crc kubenswrapper[4910]: E0105 21:52:56.720885 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:52:56 crc kubenswrapper[4910]: E0105 21:52:56.720957 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.736953 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.737015 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.737034 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.737058 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.737077 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:56Z","lastTransitionTime":"2026-01-05T21:52:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.839948 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.840001 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.840016 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.840041 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.840061 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:56Z","lastTransitionTime":"2026-01-05T21:52:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.943414 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.943504 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.943531 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.943571 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:56 crc kubenswrapper[4910]: I0105 21:52:56.943599 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:56Z","lastTransitionTime":"2026-01-05T21:52:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.047785 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.047826 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.047835 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.047851 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.047865 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:57Z","lastTransitionTime":"2026-01-05T21:52:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.150718 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.150767 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.150783 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.150804 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.150820 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:57Z","lastTransitionTime":"2026-01-05T21:52:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.254018 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.254105 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.254164 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.254199 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.254220 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:57Z","lastTransitionTime":"2026-01-05T21:52:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.356733 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.356780 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.356792 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.356811 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.356823 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:57Z","lastTransitionTime":"2026-01-05T21:52:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.460786 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.460871 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.460894 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.460923 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.460944 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:57Z","lastTransitionTime":"2026-01-05T21:52:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.564265 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.564351 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.564378 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.564415 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.564440 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:57Z","lastTransitionTime":"2026-01-05T21:52:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.669054 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.669165 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.669192 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.669223 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.669248 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:57Z","lastTransitionTime":"2026-01-05T21:52:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.721499 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:57 crc kubenswrapper[4910]: E0105 21:52:57.721728 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.773614 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.773711 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.773739 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.773779 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.773803 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:57Z","lastTransitionTime":"2026-01-05T21:52:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.878103 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.878169 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.878185 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.878204 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.878219 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:57Z","lastTransitionTime":"2026-01-05T21:52:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.982237 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.982317 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.982341 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.982374 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:57 crc kubenswrapper[4910]: I0105 21:52:57.982398 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:57Z","lastTransitionTime":"2026-01-05T21:52:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.085483 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.085586 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.085598 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.085623 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.085640 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:58Z","lastTransitionTime":"2026-01-05T21:52:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.190059 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.190150 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.190188 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.190209 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.190221 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:58Z","lastTransitionTime":"2026-01-05T21:52:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.293768 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.293851 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.293874 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.293907 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.293927 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:58Z","lastTransitionTime":"2026-01-05T21:52:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.397183 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.397228 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.397268 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.397288 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.397302 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:58Z","lastTransitionTime":"2026-01-05T21:52:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.500608 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.500703 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.500722 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.500748 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.500770 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:58Z","lastTransitionTime":"2026-01-05T21:52:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.604093 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.604268 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.604292 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.604363 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.604401 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:58Z","lastTransitionTime":"2026-01-05T21:52:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.708000 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.708080 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.708092 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.708139 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.708156 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:58Z","lastTransitionTime":"2026-01-05T21:52:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.720985 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.721081 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:52:58 crc kubenswrapper[4910]: E0105 21:52:58.721253 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.721317 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:52:58 crc kubenswrapper[4910]: E0105 21:52:58.721539 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:52:58 crc kubenswrapper[4910]: E0105 21:52:58.721621 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.788866 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=92.788822335 podStartE2EDuration="1m32.788822335s" podCreationTimestamp="2026-01-05 21:51:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:52:58.762390787 +0000 UTC m=+110.339888497" watchObservedRunningTime="2026-01-05 21:52:58.788822335 +0000 UTC m=+110.366320055" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.789543 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=64.789527193 podStartE2EDuration="1m4.789527193s" podCreationTimestamp="2026-01-05 21:51:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:52:58.789499062 +0000 UTC m=+110.366996792" watchObservedRunningTime="2026-01-05 21:52:58.789527193 +0000 UTC m=+110.367024903" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.817369 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.820212 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.820241 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.820261 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.820276 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:58Z","lastTransitionTime":"2026-01-05T21:52:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.860252 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-t58h4" podStartSLOduration=87.860228322 podStartE2EDuration="1m27.860228322s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:52:58.860011737 +0000 UTC m=+110.437509407" watchObservedRunningTime="2026-01-05 21:52:58.860228322 +0000 UTC m=+110.437725992" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.897495 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=33.89747091 podStartE2EDuration="33.89747091s" podCreationTimestamp="2026-01-05 21:52:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:52:58.878048496 +0000 UTC m=+110.455546166" watchObservedRunningTime="2026-01-05 21:52:58.89747091 +0000 UTC m=+110.474968570" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.912979 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=89.912958555 podStartE2EDuration="1m29.912958555s" podCreationTimestamp="2026-01-05 21:51:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:52:58.897750197 +0000 UTC m=+110.475247867" watchObservedRunningTime="2026-01-05 21:52:58.912958555 +0000 UTC m=+110.490456225" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.925077 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.925358 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.925455 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.925571 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.925676 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:58Z","lastTransitionTime":"2026-01-05T21:52:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.944638 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-6566d" podStartSLOduration=87.944613423 podStartE2EDuration="1m27.944613423s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:52:58.94449646 +0000 UTC m=+110.521994130" watchObservedRunningTime="2026-01-05 21:52:58.944613423 +0000 UTC m=+110.522111103" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.989145 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-458lg" podStartSLOduration=87.989109661 podStartE2EDuration="1m27.989109661s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:52:58.958293814 +0000 UTC m=+110.535791484" watchObservedRunningTime="2026-01-05 21:52:58.989109661 +0000 UTC m=+110.566607331" Jan 05 21:52:58 crc kubenswrapper[4910]: I0105 21:52:58.989704 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=88.989700135 podStartE2EDuration="1m28.989700135s" podCreationTimestamp="2026-01-05 21:51:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:52:58.989525121 +0000 UTC m=+110.567022791" watchObservedRunningTime="2026-01-05 21:52:58.989700135 +0000 UTC m=+110.567197805" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.028703 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.028978 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.029093 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.029207 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.029302 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:59Z","lastTransitionTime":"2026-01-05T21:52:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.056587 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-hflcr" podStartSLOduration=88.05656447 podStartE2EDuration="1m28.05656447s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:52:59.056196861 +0000 UTC m=+110.633694531" watchObservedRunningTime="2026-01-05 21:52:59.05656447 +0000 UTC m=+110.634062140" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.130723 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-9zscm" podStartSLOduration=88.130697685 podStartE2EDuration="1m28.130697685s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:52:59.129775712 +0000 UTC m=+110.707273392" watchObservedRunningTime="2026-01-05 21:52:59.130697685 +0000 UTC m=+110.708195355" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.131645 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.131697 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.131713 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.131731 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.131744 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:59Z","lastTransitionTime":"2026-01-05T21:52:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.149576 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podStartSLOduration=88.149557955 podStartE2EDuration="1m28.149557955s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:52:59.149541544 +0000 UTC m=+110.727039274" watchObservedRunningTime="2026-01-05 21:52:59.149557955 +0000 UTC m=+110.727055625" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.236507 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.236563 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.236579 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.236601 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.236616 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:59Z","lastTransitionTime":"2026-01-05T21:52:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.339943 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.340017 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.340036 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.340072 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.340092 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:59Z","lastTransitionTime":"2026-01-05T21:52:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.443539 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.443600 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.443618 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.443644 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.443662 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:59Z","lastTransitionTime":"2026-01-05T21:52:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.546906 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.546975 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.546998 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.547030 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.547051 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:59Z","lastTransitionTime":"2026-01-05T21:52:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.650587 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.650630 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.650644 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.650663 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.650674 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:59Z","lastTransitionTime":"2026-01-05T21:52:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.721233 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:52:59 crc kubenswrapper[4910]: E0105 21:52:59.721433 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.753621 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.753707 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.753758 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.753784 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.753796 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:59Z","lastTransitionTime":"2026-01-05T21:52:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.858658 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.858731 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.858748 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.858777 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.858797 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:59Z","lastTransitionTime":"2026-01-05T21:52:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.961634 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.961706 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.961724 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.961752 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:52:59 crc kubenswrapper[4910]: I0105 21:52:59.961772 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:52:59Z","lastTransitionTime":"2026-01-05T21:52:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.065813 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.065884 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.065905 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.065935 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.065954 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:53:00Z","lastTransitionTime":"2026-01-05T21:53:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.169984 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.170060 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.170076 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.170107 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.170164 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:53:00Z","lastTransitionTime":"2026-01-05T21:53:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.274107 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.274209 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.274228 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.274255 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.274276 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:53:00Z","lastTransitionTime":"2026-01-05T21:53:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.377757 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.377829 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.377852 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.377881 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.377901 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:53:00Z","lastTransitionTime":"2026-01-05T21:53:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.481374 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.482034 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.482293 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.482750 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.483089 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:53:00Z","lastTransitionTime":"2026-01-05T21:53:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.587550 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.588316 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.588679 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.588878 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.589345 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:53:00Z","lastTransitionTime":"2026-01-05T21:53:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.692692 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.693319 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.693526 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.693685 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.693829 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:53:00Z","lastTransitionTime":"2026-01-05T21:53:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.720744 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:53:00 crc kubenswrapper[4910]: E0105 21:53:00.721376 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.721525 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.721636 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:53:00 crc kubenswrapper[4910]: E0105 21:53:00.721712 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:53:00 crc kubenswrapper[4910]: E0105 21:53:00.721856 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.797899 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.797962 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.797978 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.798004 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.798025 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:53:00Z","lastTransitionTime":"2026-01-05T21:53:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.900729 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.900791 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.900803 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.900825 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:53:00 crc kubenswrapper[4910]: I0105 21:53:00.900842 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:53:00Z","lastTransitionTime":"2026-01-05T21:53:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.003953 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.004035 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.004054 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.004086 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.004107 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:53:01Z","lastTransitionTime":"2026-01-05T21:53:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.107730 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.107800 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.107824 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.107895 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.107927 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:53:01Z","lastTransitionTime":"2026-01-05T21:53:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.212052 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.212171 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.212202 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.212233 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.212250 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:53:01Z","lastTransitionTime":"2026-01-05T21:53:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.316411 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.316502 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.316527 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.316555 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.316575 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:53:01Z","lastTransitionTime":"2026-01-05T21:53:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.420564 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.420617 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.420628 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.420648 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.420660 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:53:01Z","lastTransitionTime":"2026-01-05T21:53:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.524926 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.525105 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.525169 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.525229 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.525267 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:53:01Z","lastTransitionTime":"2026-01-05T21:53:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.629598 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.630097 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.630229 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.630391 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.630513 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:53:01Z","lastTransitionTime":"2026-01-05T21:53:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.721473 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:53:01 crc kubenswrapper[4910]: E0105 21:53:01.722165 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.733971 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.734050 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.734077 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.734113 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.734179 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:53:01Z","lastTransitionTime":"2026-01-05T21:53:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.848205 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.848305 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.848326 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.848829 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.848883 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:53:01Z","lastTransitionTime":"2026-01-05T21:53:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.952188 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.952265 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.952279 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.952325 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:53:01 crc kubenswrapper[4910]: I0105 21:53:01.952338 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:53:01Z","lastTransitionTime":"2026-01-05T21:53:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.055559 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.055628 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.055640 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.055658 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.055670 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:53:02Z","lastTransitionTime":"2026-01-05T21:53:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.159546 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.159617 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.159638 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.159665 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.159685 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:53:02Z","lastTransitionTime":"2026-01-05T21:53:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.262642 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.262723 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.262742 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.262774 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.262791 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:53:02Z","lastTransitionTime":"2026-01-05T21:53:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.366308 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.366377 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.366398 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.366426 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.366448 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:53:02Z","lastTransitionTime":"2026-01-05T21:53:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.470104 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.470220 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.470239 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.470273 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.470295 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:53:02Z","lastTransitionTime":"2026-01-05T21:53:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.573916 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.573986 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.574006 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.574038 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.574056 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:53:02Z","lastTransitionTime":"2026-01-05T21:53:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.677616 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.677687 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.677711 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.677738 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.677757 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:53:02Z","lastTransitionTime":"2026-01-05T21:53:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.720687 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.720768 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.720906 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:53:02 crc kubenswrapper[4910]: E0105 21:53:02.721098 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:53:02 crc kubenswrapper[4910]: E0105 21:53:02.721331 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:53:02 crc kubenswrapper[4910]: E0105 21:53:02.721586 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.782340 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.783584 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.784206 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.784431 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.784565 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:53:02Z","lastTransitionTime":"2026-01-05T21:53:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.888302 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.888392 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.888416 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.888458 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.888481 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:53:02Z","lastTransitionTime":"2026-01-05T21:53:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.991689 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.992073 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.992154 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.992240 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:53:02 crc kubenswrapper[4910]: I0105 21:53:02.992338 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:53:02Z","lastTransitionTime":"2026-01-05T21:53:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.096232 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.096851 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.097111 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.097261 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.097370 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:53:03Z","lastTransitionTime":"2026-01-05T21:53:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.201331 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.201783 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.201884 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.201973 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.202058 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:53:03Z","lastTransitionTime":"2026-01-05T21:53:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.247938 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.248007 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.248028 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.248056 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.248077 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-05T21:53:03Z","lastTransitionTime":"2026-01-05T21:53:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.330391 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-hj9jn"] Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.331059 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hj9jn" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.336356 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.336426 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.338835 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.340408 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.475704 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/f67674fe-4fbc-40b3-9c4f-01e98f621b1f-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-hj9jn\" (UID: \"f67674fe-4fbc-40b3-9c4f-01e98f621b1f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hj9jn" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.475825 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f67674fe-4fbc-40b3-9c4f-01e98f621b1f-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-hj9jn\" (UID: \"f67674fe-4fbc-40b3-9c4f-01e98f621b1f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hj9jn" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.475893 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/f67674fe-4fbc-40b3-9c4f-01e98f621b1f-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-hj9jn\" (UID: \"f67674fe-4fbc-40b3-9c4f-01e98f621b1f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hj9jn" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.475928 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f67674fe-4fbc-40b3-9c4f-01e98f621b1f-service-ca\") pod \"cluster-version-operator-5c965bbfc6-hj9jn\" (UID: \"f67674fe-4fbc-40b3-9c4f-01e98f621b1f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hj9jn" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.476019 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f67674fe-4fbc-40b3-9c4f-01e98f621b1f-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-hj9jn\" (UID: \"f67674fe-4fbc-40b3-9c4f-01e98f621b1f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hj9jn" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.577856 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f67674fe-4fbc-40b3-9c4f-01e98f621b1f-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-hj9jn\" (UID: \"f67674fe-4fbc-40b3-9c4f-01e98f621b1f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hj9jn" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.578018 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/f67674fe-4fbc-40b3-9c4f-01e98f621b1f-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-hj9jn\" (UID: \"f67674fe-4fbc-40b3-9c4f-01e98f621b1f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hj9jn" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.578103 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f67674fe-4fbc-40b3-9c4f-01e98f621b1f-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-hj9jn\" (UID: \"f67674fe-4fbc-40b3-9c4f-01e98f621b1f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hj9jn" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.578221 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/f67674fe-4fbc-40b3-9c4f-01e98f621b1f-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-hj9jn\" (UID: \"f67674fe-4fbc-40b3-9c4f-01e98f621b1f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hj9jn" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.578268 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f67674fe-4fbc-40b3-9c4f-01e98f621b1f-service-ca\") pod \"cluster-version-operator-5c965bbfc6-hj9jn\" (UID: \"f67674fe-4fbc-40b3-9c4f-01e98f621b1f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hj9jn" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.578281 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/f67674fe-4fbc-40b3-9c4f-01e98f621b1f-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-hj9jn\" (UID: \"f67674fe-4fbc-40b3-9c4f-01e98f621b1f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hj9jn" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.578489 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/f67674fe-4fbc-40b3-9c4f-01e98f621b1f-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-hj9jn\" (UID: \"f67674fe-4fbc-40b3-9c4f-01e98f621b1f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hj9jn" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.580203 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f67674fe-4fbc-40b3-9c4f-01e98f621b1f-service-ca\") pod \"cluster-version-operator-5c965bbfc6-hj9jn\" (UID: \"f67674fe-4fbc-40b3-9c4f-01e98f621b1f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hj9jn" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.593183 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f67674fe-4fbc-40b3-9c4f-01e98f621b1f-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-hj9jn\" (UID: \"f67674fe-4fbc-40b3-9c4f-01e98f621b1f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hj9jn" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.609702 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f67674fe-4fbc-40b3-9c4f-01e98f621b1f-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-hj9jn\" (UID: \"f67674fe-4fbc-40b3-9c4f-01e98f621b1f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hj9jn" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.665375 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hj9jn" Jan 05 21:53:03 crc kubenswrapper[4910]: I0105 21:53:03.720422 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:53:03 crc kubenswrapper[4910]: E0105 21:53:03.720581 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:53:04 crc kubenswrapper[4910]: I0105 21:53:04.365898 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hj9jn" event={"ID":"f67674fe-4fbc-40b3-9c4f-01e98f621b1f","Type":"ContainerStarted","Data":"4a677049155555c903855e9477a31b577987273161d14f71be37271236993400"} Jan 05 21:53:04 crc kubenswrapper[4910]: I0105 21:53:04.366028 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hj9jn" event={"ID":"f67674fe-4fbc-40b3-9c4f-01e98f621b1f","Type":"ContainerStarted","Data":"c80cfd67f86d6094b82496b6d2eeb90bbca06f1b987e0cdc697e0d076fc6a3df"} Jan 05 21:53:04 crc kubenswrapper[4910]: I0105 21:53:04.367782 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-9zscm_07ebbe82-9e6e-47a5-91a7-4b515efc78db/kube-multus/1.log" Jan 05 21:53:04 crc kubenswrapper[4910]: I0105 21:53:04.368525 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-9zscm_07ebbe82-9e6e-47a5-91a7-4b515efc78db/kube-multus/0.log" Jan 05 21:53:04 crc kubenswrapper[4910]: I0105 21:53:04.368644 4910 generic.go:334] "Generic (PLEG): container finished" podID="07ebbe82-9e6e-47a5-91a7-4b515efc78db" containerID="8f84f3608a1f16a89bb0b2bd33ddfd1fd31073c40e4528dd2de478f96cf60a75" exitCode=1 Jan 05 21:53:04 crc kubenswrapper[4910]: I0105 21:53:04.368689 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-9zscm" event={"ID":"07ebbe82-9e6e-47a5-91a7-4b515efc78db","Type":"ContainerDied","Data":"8f84f3608a1f16a89bb0b2bd33ddfd1fd31073c40e4528dd2de478f96cf60a75"} Jan 05 21:53:04 crc kubenswrapper[4910]: I0105 21:53:04.368751 4910 scope.go:117] "RemoveContainer" containerID="3dc4c4e260e0f06451df4647f8f294d5227e5ebdfc379dad775689291fe4b2f8" Jan 05 21:53:04 crc kubenswrapper[4910]: I0105 21:53:04.369309 4910 scope.go:117] "RemoveContainer" containerID="8f84f3608a1f16a89bb0b2bd33ddfd1fd31073c40e4528dd2de478f96cf60a75" Jan 05 21:53:04 crc kubenswrapper[4910]: E0105 21:53:04.369555 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-9zscm_openshift-multus(07ebbe82-9e6e-47a5-91a7-4b515efc78db)\"" pod="openshift-multus/multus-9zscm" podUID="07ebbe82-9e6e-47a5-91a7-4b515efc78db" Jan 05 21:53:04 crc kubenswrapper[4910]: I0105 21:53:04.394547 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hj9jn" podStartSLOduration=93.394515274 podStartE2EDuration="1m33.394515274s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:04.392757361 +0000 UTC m=+115.970255061" watchObservedRunningTime="2026-01-05 21:53:04.394515274 +0000 UTC m=+115.972012944" Jan 05 21:53:04 crc kubenswrapper[4910]: I0105 21:53:04.720643 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:53:04 crc kubenswrapper[4910]: I0105 21:53:04.720710 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:53:04 crc kubenswrapper[4910]: I0105 21:53:04.720721 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:53:04 crc kubenswrapper[4910]: E0105 21:53:04.721100 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:53:04 crc kubenswrapper[4910]: E0105 21:53:04.721248 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:53:04 crc kubenswrapper[4910]: E0105 21:53:04.721365 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:53:04 crc kubenswrapper[4910]: I0105 21:53:04.722948 4910 scope.go:117] "RemoveContainer" containerID="b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed" Jan 05 21:53:04 crc kubenswrapper[4910]: E0105 21:53:04.723282 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-fpk76_openshift-ovn-kubernetes(f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" Jan 05 21:53:05 crc kubenswrapper[4910]: I0105 21:53:05.375980 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-9zscm_07ebbe82-9e6e-47a5-91a7-4b515efc78db/kube-multus/1.log" Jan 05 21:53:05 crc kubenswrapper[4910]: I0105 21:53:05.721341 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:53:05 crc kubenswrapper[4910]: E0105 21:53:05.721544 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:53:06 crc kubenswrapper[4910]: I0105 21:53:06.721389 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:53:06 crc kubenswrapper[4910]: I0105 21:53:06.721530 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:53:06 crc kubenswrapper[4910]: E0105 21:53:06.721634 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:53:06 crc kubenswrapper[4910]: E0105 21:53:06.721686 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:53:06 crc kubenswrapper[4910]: I0105 21:53:06.721537 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:53:06 crc kubenswrapper[4910]: E0105 21:53:06.721943 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:53:07 crc kubenswrapper[4910]: I0105 21:53:07.720581 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:53:07 crc kubenswrapper[4910]: E0105 21:53:07.720753 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:53:08 crc kubenswrapper[4910]: E0105 21:53:08.689630 4910 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Jan 05 21:53:08 crc kubenswrapper[4910]: I0105 21:53:08.720580 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:53:08 crc kubenswrapper[4910]: I0105 21:53:08.720695 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:53:08 crc kubenswrapper[4910]: E0105 21:53:08.725189 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:53:08 crc kubenswrapper[4910]: I0105 21:53:08.726184 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:53:08 crc kubenswrapper[4910]: E0105 21:53:08.726593 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:53:08 crc kubenswrapper[4910]: E0105 21:53:08.727565 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:53:08 crc kubenswrapper[4910]: E0105 21:53:08.825295 4910 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 05 21:53:09 crc kubenswrapper[4910]: I0105 21:53:09.721325 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:53:09 crc kubenswrapper[4910]: E0105 21:53:09.721568 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:53:10 crc kubenswrapper[4910]: I0105 21:53:10.720816 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:53:10 crc kubenswrapper[4910]: I0105 21:53:10.720914 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:53:10 crc kubenswrapper[4910]: E0105 21:53:10.721075 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:53:10 crc kubenswrapper[4910]: I0105 21:53:10.721206 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:53:10 crc kubenswrapper[4910]: E0105 21:53:10.721450 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:53:10 crc kubenswrapper[4910]: E0105 21:53:10.721539 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:53:11 crc kubenswrapper[4910]: I0105 21:53:11.720891 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:53:11 crc kubenswrapper[4910]: E0105 21:53:11.721690 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:53:12 crc kubenswrapper[4910]: I0105 21:53:12.720787 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:53:12 crc kubenswrapper[4910]: I0105 21:53:12.720874 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:53:12 crc kubenswrapper[4910]: I0105 21:53:12.720798 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:53:12 crc kubenswrapper[4910]: E0105 21:53:12.721075 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:53:12 crc kubenswrapper[4910]: E0105 21:53:12.721295 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:53:12 crc kubenswrapper[4910]: E0105 21:53:12.721407 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:53:13 crc kubenswrapper[4910]: I0105 21:53:13.720600 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:53:13 crc kubenswrapper[4910]: E0105 21:53:13.720959 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:53:13 crc kubenswrapper[4910]: E0105 21:53:13.827182 4910 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 05 21:53:14 crc kubenswrapper[4910]: I0105 21:53:14.721665 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:53:14 crc kubenswrapper[4910]: I0105 21:53:14.721796 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:53:14 crc kubenswrapper[4910]: I0105 21:53:14.721686 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:53:14 crc kubenswrapper[4910]: E0105 21:53:14.722000 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:53:14 crc kubenswrapper[4910]: E0105 21:53:14.722951 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:53:14 crc kubenswrapper[4910]: E0105 21:53:14.723107 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:53:15 crc kubenswrapper[4910]: I0105 21:53:15.720547 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:53:15 crc kubenswrapper[4910]: E0105 21:53:15.720728 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:53:15 crc kubenswrapper[4910]: I0105 21:53:15.721778 4910 scope.go:117] "RemoveContainer" containerID="b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed" Jan 05 21:53:16 crc kubenswrapper[4910]: I0105 21:53:16.426411 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpk76_f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b/ovnkube-controller/3.log" Jan 05 21:53:16 crc kubenswrapper[4910]: I0105 21:53:16.429537 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" event={"ID":"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b","Type":"ContainerStarted","Data":"38d9833c4a5e415a92059ebf4d861d5b054b8235938e1980f88e206d819daee6"} Jan 05 21:53:16 crc kubenswrapper[4910]: I0105 21:53:16.430410 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:53:16 crc kubenswrapper[4910]: I0105 21:53:16.471345 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" podStartSLOduration=105.47131467 podStartE2EDuration="1m45.47131467s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:16.467617978 +0000 UTC m=+128.045115658" watchObservedRunningTime="2026-01-05 21:53:16.47131467 +0000 UTC m=+128.048812370" Jan 05 21:53:16 crc kubenswrapper[4910]: I0105 21:53:16.721617 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:53:16 crc kubenswrapper[4910]: I0105 21:53:16.721683 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:53:16 crc kubenswrapper[4910]: I0105 21:53:16.721643 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:53:16 crc kubenswrapper[4910]: E0105 21:53:16.721861 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:53:16 crc kubenswrapper[4910]: E0105 21:53:16.722028 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:53:16 crc kubenswrapper[4910]: E0105 21:53:16.722260 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:53:16 crc kubenswrapper[4910]: I0105 21:53:16.748924 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-mns6n"] Jan 05 21:53:16 crc kubenswrapper[4910]: I0105 21:53:16.749174 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:53:16 crc kubenswrapper[4910]: E0105 21:53:16.749349 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:53:18 crc kubenswrapper[4910]: I0105 21:53:18.720976 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:53:18 crc kubenswrapper[4910]: I0105 21:53:18.720979 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:53:18 crc kubenswrapper[4910]: I0105 21:53:18.721188 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:53:18 crc kubenswrapper[4910]: I0105 21:53:18.721220 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:53:18 crc kubenswrapper[4910]: E0105 21:53:18.724546 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:53:18 crc kubenswrapper[4910]: E0105 21:53:18.724684 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:53:18 crc kubenswrapper[4910]: I0105 21:53:18.724823 4910 scope.go:117] "RemoveContainer" containerID="8f84f3608a1f16a89bb0b2bd33ddfd1fd31073c40e4528dd2de478f96cf60a75" Jan 05 21:53:18 crc kubenswrapper[4910]: E0105 21:53:18.724828 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:53:18 crc kubenswrapper[4910]: E0105 21:53:18.724954 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:53:18 crc kubenswrapper[4910]: E0105 21:53:18.828253 4910 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 05 21:53:19 crc kubenswrapper[4910]: I0105 21:53:19.444873 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-9zscm_07ebbe82-9e6e-47a5-91a7-4b515efc78db/kube-multus/1.log" Jan 05 21:53:19 crc kubenswrapper[4910]: I0105 21:53:19.444954 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-9zscm" event={"ID":"07ebbe82-9e6e-47a5-91a7-4b515efc78db","Type":"ContainerStarted","Data":"1e8e55b2eb471b04f5366d8afb10f17f2bd5769bbfb6591d9aa2ac2beafc6b0c"} Jan 05 21:53:20 crc kubenswrapper[4910]: I0105 21:53:20.721258 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:53:20 crc kubenswrapper[4910]: I0105 21:53:20.721376 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:53:20 crc kubenswrapper[4910]: I0105 21:53:20.721402 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:53:20 crc kubenswrapper[4910]: E0105 21:53:20.721504 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:53:20 crc kubenswrapper[4910]: E0105 21:53:20.721636 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:53:20 crc kubenswrapper[4910]: I0105 21:53:20.721729 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:53:20 crc kubenswrapper[4910]: E0105 21:53:20.721791 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:53:20 crc kubenswrapper[4910]: E0105 21:53:20.721979 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:53:22 crc kubenswrapper[4910]: I0105 21:53:22.720725 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:53:22 crc kubenswrapper[4910]: I0105 21:53:22.720866 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:53:22 crc kubenswrapper[4910]: E0105 21:53:22.720972 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 05 21:53:22 crc kubenswrapper[4910]: I0105 21:53:22.720756 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:53:22 crc kubenswrapper[4910]: I0105 21:53:22.720881 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:53:22 crc kubenswrapper[4910]: E0105 21:53:22.721238 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mns6n" podUID="74c455b1-4706-4ca7-bd82-2b99c3c83e3f" Jan 05 21:53:22 crc kubenswrapper[4910]: E0105 21:53:22.721306 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 05 21:53:22 crc kubenswrapper[4910]: E0105 21:53:22.721399 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 05 21:53:24 crc kubenswrapper[4910]: I0105 21:53:24.721403 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:53:24 crc kubenswrapper[4910]: I0105 21:53:24.721511 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:53:24 crc kubenswrapper[4910]: I0105 21:53:24.721556 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:53:24 crc kubenswrapper[4910]: I0105 21:53:24.721838 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:53:24 crc kubenswrapper[4910]: I0105 21:53:24.725416 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 05 21:53:24 crc kubenswrapper[4910]: I0105 21:53:24.725480 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 05 21:53:24 crc kubenswrapper[4910]: I0105 21:53:24.725495 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 05 21:53:24 crc kubenswrapper[4910]: I0105 21:53:24.725649 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 05 21:53:24 crc kubenswrapper[4910]: I0105 21:53:24.725799 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 05 21:53:24 crc kubenswrapper[4910]: I0105 21:53:24.726941 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.082532 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.134316 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-6d5lf"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.155019 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-8z8h7"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.155447 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-sfnhj"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.155684 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-6d5lf" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.155861 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-sfnhj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.156495 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-8z8h7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.167877 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.168229 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.168579 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.168675 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.168857 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.169047 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.169185 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.169342 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.171098 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.171527 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.171731 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.171961 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.172163 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.172339 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.172506 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.172677 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.172827 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.179164 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.185209 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.185982 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-f56kd"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.186409 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-jh8zt"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.186677 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.186731 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9tbp2"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.187182 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9tbp2" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.187683 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.188026 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jh8zt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.188099 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-gqzj7"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.189230 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.189572 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cbfc7"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.190373 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.190703 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cbfc7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.195664 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.198179 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.198456 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-pz96p"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.199222 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-47djl"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.199668 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-k2d98"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.199871 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.200203 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-k2d98" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.200366 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.200688 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pz96p" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.200833 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.200977 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.201054 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-47djl" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.201114 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.201404 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.201599 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.201780 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.201820 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.201977 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.202169 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m9xnz"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.202741 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-82zlp"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.203156 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-82zlp" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.203513 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m9xnz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.204212 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.204328 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.204672 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.204912 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.205041 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.205216 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.205319 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.205398 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.205431 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.205549 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.205596 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.205698 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.205721 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.205842 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.205936 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.205552 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.206080 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.206198 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.206342 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.206369 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.206477 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.206620 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.206670 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.206757 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.206958 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.207375 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.212861 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-g5xxj"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.230300 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-j9xtz"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.230920 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-44vj4"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.231038 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-g5xxj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.232816 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-j9xtz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.250696 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.251521 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-fcms5"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.252178 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-8ncwl"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.252356 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-fcms5" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.252699 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-ktdtg"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.252980 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8ncwl" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.253256 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ktdtg" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.256634 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.256845 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.256924 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.257026 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.257110 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.257228 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.257393 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.257557 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.257779 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.258030 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.258135 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.258410 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.258600 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.258769 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.259490 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.259760 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.256863 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.259969 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.258798 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/aa805313-499f-47e9-8ffa-827fb2664a71-audit-dir\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.260221 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/5e51587e-3444-440f-802a-347a93a869ad-etcd-service-ca\") pod \"etcd-operator-b45778765-j9xtz\" (UID: \"5e51587e-3444-440f-802a-347a93a869ad\") " pod="openshift-etcd-operator/etcd-operator-b45778765-j9xtz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.260317 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d856811b-12c9-4b55-bf0d-3da687639b65-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-m9xnz\" (UID: \"d856811b-12c9-4b55-bf0d-3da687639b65\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m9xnz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.260415 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/b9194562-89b4-49cc-b0d2-7875fd2640d8-encryption-config\") pod \"apiserver-7bbb656c7d-bb2c8\" (UID: \"b9194562-89b4-49cc-b0d2-7875fd2640d8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.263868 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e51587e-3444-440f-802a-347a93a869ad-config\") pod \"etcd-operator-b45778765-j9xtz\" (UID: \"5e51587e-3444-440f-802a-347a93a869ad\") " pod="openshift-etcd-operator/etcd-operator-b45778765-j9xtz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.263956 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7xx7h\" (UniqueName: \"kubernetes.io/projected/42cd7d43-1bdf-4961-bcae-6f638a83b8e0-kube-api-access-7xx7h\") pod \"openshift-controller-manager-operator-756b6f6bc6-47djl\" (UID: \"42cd7d43-1bdf-4961-bcae-6f638a83b8e0\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-47djl" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.264080 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-console-oauth-config\") pod \"console-f9d7485db-g5xxj\" (UID: \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\") " pod="openshift-console/console-f9d7485db-g5xxj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.264212 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-audit\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.264297 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.264368 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-shhkp\" (UniqueName: \"kubernetes.io/projected/d856811b-12c9-4b55-bf0d-3da687639b65-kube-api-access-shhkp\") pod \"cluster-image-registry-operator-dc59b4c8b-m9xnz\" (UID: \"d856811b-12c9-4b55-bf0d-3da687639b65\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m9xnz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.264443 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-etcd-serving-ca\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.264518 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/ed6873d6-2014-4326-bb4f-939fab37b01c-available-featuregates\") pod \"openshift-config-operator-7777fb866f-pz96p\" (UID: \"ed6873d6-2014-4326-bb4f-939fab37b01c\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-pz96p" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.264591 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f869ba01-9cc5-403c-a234-7a6e4864c8fb-config\") pod \"controller-manager-879f6c89f-8z8h7\" (UID: \"f869ba01-9cc5-403c-a234-7a6e4864c8fb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-8z8h7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.264672 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69e2d768-6b62-446e-a239-4b221ba0a979-config\") pod \"route-controller-manager-6576b87f9c-82zlp\" (UID: \"69e2d768-6b62-446e-a239-4b221ba0a979\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-82zlp" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.264750 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-service-ca\") pod \"console-f9d7485db-g5xxj\" (UID: \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\") " pod="openshift-console/console-f9d7485db-g5xxj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.264821 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/12041e15-b6da-4d62-b434-ceb0e39480a6-serving-cert\") pod \"authentication-operator-69f744f599-sfnhj\" (UID: \"12041e15-b6da-4d62-b434-ceb0e39480a6\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-sfnhj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.264891 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-audit-dir\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.264959 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b9194562-89b4-49cc-b0d2-7875fd2640d8-audit-policies\") pod \"apiserver-7bbb656c7d-bb2c8\" (UID: \"b9194562-89b4-49cc-b0d2-7875fd2640d8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.265031 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/b9194562-89b4-49cc-b0d2-7875fd2640d8-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-bb2c8\" (UID: \"b9194562-89b4-49cc-b0d2-7875fd2640d8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.265092 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b9194562-89b4-49cc-b0d2-7875fd2640d8-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-bb2c8\" (UID: \"b9194562-89b4-49cc-b0d2-7875fd2640d8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.265200 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rs6qv\" (UniqueName: \"kubernetes.io/projected/aa805313-499f-47e9-8ffa-827fb2664a71-kube-api-access-rs6qv\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.265273 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/69e2d768-6b62-446e-a239-4b221ba0a979-client-ca\") pod \"route-controller-manager-6576b87f9c-82zlp\" (UID: \"69e2d768-6b62-446e-a239-4b221ba0a979\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-82zlp" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.265352 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z9t8t\" (UniqueName: \"kubernetes.io/projected/b9194562-89b4-49cc-b0d2-7875fd2640d8-kube-api-access-z9t8t\") pod \"apiserver-7bbb656c7d-bb2c8\" (UID: \"b9194562-89b4-49cc-b0d2-7875fd2640d8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.265416 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed6873d6-2014-4326-bb4f-939fab37b01c-serving-cert\") pod \"openshift-config-operator-7777fb866f-pz96p\" (UID: \"ed6873d6-2014-4326-bb4f-939fab37b01c\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-pz96p" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.265491 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sk4sv\" (UniqueName: \"kubernetes.io/projected/ed6873d6-2014-4326-bb4f-939fab37b01c-kube-api-access-sk4sv\") pod \"openshift-config-operator-7777fb866f-pz96p\" (UID: \"ed6873d6-2014-4326-bb4f-939fab37b01c\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-pz96p" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.265562 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f869ba01-9cc5-403c-a234-7a6e4864c8fb-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-8z8h7\" (UID: \"f869ba01-9cc5-403c-a234-7a6e4864c8fb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-8z8h7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.265630 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-encryption-config\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.265710 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7pc2s\" (UniqueName: \"kubernetes.io/projected/b859ccb0-eb52-4086-8db1-cf1543b934d9-kube-api-access-7pc2s\") pod \"downloads-7954f5f757-k2d98\" (UID: \"b859ccb0-eb52-4086-8db1-cf1543b934d9\") " pod="openshift-console/downloads-7954f5f757-k2d98" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.260104 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.259039 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.260220 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.260259 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.260295 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.260333 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.260396 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.260436 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.260470 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.260512 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.260550 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.266793 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.266843 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-trusted-ca-bundle\") pod \"console-f9d7485db-g5xxj\" (UID: \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\") " pod="openshift-console/console-f9d7485db-g5xxj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.266861 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/b9194562-89b4-49cc-b0d2-7875fd2640d8-etcd-client\") pod \"apiserver-7bbb656c7d-bb2c8\" (UID: \"b9194562-89b4-49cc-b0d2-7875fd2640d8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.266890 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/d856811b-12c9-4b55-bf0d-3da687639b65-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-m9xnz\" (UID: \"d856811b-12c9-4b55-bf0d-3da687639b65\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m9xnz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.266907 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4bwx5\" (UniqueName: \"kubernetes.io/projected/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-kube-api-access-4bwx5\") pod \"console-f9d7485db-g5xxj\" (UID: \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\") " pod="openshift-console/console-f9d7485db-g5xxj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.266926 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/aa805313-499f-47e9-8ffa-827fb2664a71-audit-policies\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.267036 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-image-import-ca\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.267158 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-etcd-client\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.267187 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f869ba01-9cc5-403c-a234-7a6e4864c8fb-client-ca\") pod \"controller-manager-879f6c89f-8z8h7\" (UID: \"f869ba01-9cc5-403c-a234-7a6e4864c8fb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-8z8h7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.266562 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-ck2fz"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.263849 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.267902 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k6pjp"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.267421 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nkqmv\" (UniqueName: \"kubernetes.io/projected/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-kube-api-access-nkqmv\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.268090 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.268267 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/3526640e-85a9-41f1-b79d-c31854227b25-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-6d5lf\" (UID: \"3526640e-85a9-41f1-b79d-c31854227b25\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6d5lf" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.268356 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.268428 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmxxh\" (UniqueName: \"kubernetes.io/projected/69e2d768-6b62-446e-a239-4b221ba0a979-kube-api-access-zmxxh\") pod \"route-controller-manager-6576b87f9c-82zlp\" (UID: \"69e2d768-6b62-446e-a239-4b221ba0a979\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-82zlp" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.268498 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lgv6j\" (UniqueName: \"kubernetes.io/projected/12041e15-b6da-4d62-b434-ceb0e39480a6-kube-api-access-lgv6j\") pod \"authentication-operator-69f744f599-sfnhj\" (UID: \"12041e15-b6da-4d62-b434-ceb0e39480a6\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-sfnhj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.268568 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4f414412-c2c0-4fea-a255-2444675c6f5e-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-44vj4\" (UID: \"4f414412-c2c0-4fea-a255-2444675c6f5e\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-44vj4" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.268640 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/37a900bd-079a-4b57-a7e6-e12a71e50d2f-machine-approver-tls\") pod \"machine-approver-56656f9798-jh8zt\" (UID: \"37a900bd-079a-4b57-a7e6-e12a71e50d2f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jh8zt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.268711 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f414412-c2c0-4fea-a255-2444675c6f5e-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-44vj4\" (UID: \"4f414412-c2c0-4fea-a255-2444675c6f5e\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-44vj4" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.268782 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.268849 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12041e15-b6da-4d62-b434-ceb0e39480a6-config\") pod \"authentication-operator-69f744f599-sfnhj\" (UID: \"12041e15-b6da-4d62-b434-ceb0e39480a6\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-sfnhj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.268928 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xkffq\" (UniqueName: \"kubernetes.io/projected/37a900bd-079a-4b57-a7e6-e12a71e50d2f-kube-api-access-xkffq\") pod \"machine-approver-56656f9798-jh8zt\" (UID: \"37a900bd-079a-4b57-a7e6-e12a71e50d2f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jh8zt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.269002 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37a900bd-079a-4b57-a7e6-e12a71e50d2f-config\") pod \"machine-approver-56656f9798-jh8zt\" (UID: \"37a900bd-079a-4b57-a7e6-e12a71e50d2f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jh8zt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.269073 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.269182 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/42cd7d43-1bdf-4961-bcae-6f638a83b8e0-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-47djl\" (UID: \"42cd7d43-1bdf-4961-bcae-6f638a83b8e0\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-47djl" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.269259 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4f414412-c2c0-4fea-a255-2444675c6f5e-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-44vj4\" (UID: \"4f414412-c2c0-4fea-a255-2444675c6f5e\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-44vj4" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.269334 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-oauth-serving-cert\") pod \"console-f9d7485db-g5xxj\" (UID: \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\") " pod="openshift-console/console-f9d7485db-g5xxj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.269413 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.269488 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-console-config\") pod \"console-f9d7485db-g5xxj\" (UID: \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\") " pod="openshift-console/console-f9d7485db-g5xxj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.269559 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b9194562-89b4-49cc-b0d2-7875fd2640d8-serving-cert\") pod \"apiserver-7bbb656c7d-bb2c8\" (UID: \"b9194562-89b4-49cc-b0d2-7875fd2640d8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.269627 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/c897d56d-7140-4aae-b1df-288502d6c78c-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-9tbp2\" (UID: \"c897d56d-7140-4aae-b1df-288502d6c78c\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9tbp2" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.269695 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-44vj4" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.269731 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/37a900bd-079a-4b57-a7e6-e12a71e50d2f-auth-proxy-config\") pod \"machine-approver-56656f9798-jh8zt\" (UID: \"37a900bd-079a-4b57-a7e6-e12a71e50d2f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jh8zt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.269869 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.269938 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-trusted-ca-bundle\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.270046 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9bk7j\" (UniqueName: \"kubernetes.io/projected/f869ba01-9cc5-403c-a234-7a6e4864c8fb-kube-api-access-9bk7j\") pod \"controller-manager-879f6c89f-8z8h7\" (UID: \"f869ba01-9cc5-403c-a234-7a6e4864c8fb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-8z8h7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.270136 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3526640e-85a9-41f1-b79d-c31854227b25-config\") pod \"machine-api-operator-5694c8668f-6d5lf\" (UID: \"3526640e-85a9-41f1-b79d-c31854227b25\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6d5lf" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.270223 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vc658\" (UniqueName: \"kubernetes.io/projected/c897d56d-7140-4aae-b1df-288502d6c78c-kube-api-access-vc658\") pod \"cluster-samples-operator-665b6dd947-9tbp2\" (UID: \"c897d56d-7140-4aae-b1df-288502d6c78c\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9tbp2" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.270329 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-62777\" (UniqueName: \"kubernetes.io/projected/00fa008c-8f60-4ec4-ba95-a58e71658276-kube-api-access-62777\") pod \"openshift-apiserver-operator-796bbdcf4f-cbfc7\" (UID: \"00fa008c-8f60-4ec4-ba95-a58e71658276\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cbfc7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.270410 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.270515 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5e51587e-3444-440f-802a-347a93a869ad-etcd-client\") pod \"etcd-operator-b45778765-j9xtz\" (UID: \"5e51587e-3444-440f-802a-347a93a869ad\") " pod="openshift-etcd-operator/etcd-operator-b45778765-j9xtz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.270617 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d856811b-12c9-4b55-bf0d-3da687639b65-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-m9xnz\" (UID: \"d856811b-12c9-4b55-bf0d-3da687639b65\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m9xnz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.270707 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/3526640e-85a9-41f1-b79d-c31854227b25-images\") pod \"machine-api-operator-5694c8668f-6d5lf\" (UID: \"3526640e-85a9-41f1-b79d-c31854227b25\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6d5lf" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.270786 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.270860 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5e51587e-3444-440f-802a-347a93a869ad-serving-cert\") pod \"etcd-operator-b45778765-j9xtz\" (UID: \"5e51587e-3444-440f-802a-347a93a869ad\") " pod="openshift-etcd-operator/etcd-operator-b45778765-j9xtz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.270928 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42cd7d43-1bdf-4961-bcae-6f638a83b8e0-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-47djl\" (UID: \"42cd7d43-1bdf-4961-bcae-6f638a83b8e0\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-47djl" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.270998 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-console-serving-cert\") pod \"console-f9d7485db-g5xxj\" (UID: \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\") " pod="openshift-console/console-f9d7485db-g5xxj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.271072 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-node-pullsecrets\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.271170 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f869ba01-9cc5-403c-a234-7a6e4864c8fb-serving-cert\") pod \"controller-manager-879f6c89f-8z8h7\" (UID: \"f869ba01-9cc5-403c-a234-7a6e4864c8fb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-8z8h7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.271251 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/69e2d768-6b62-446e-a239-4b221ba0a979-serving-cert\") pod \"route-controller-manager-6576b87f9c-82zlp\" (UID: \"69e2d768-6b62-446e-a239-4b221ba0a979\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-82zlp" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.271323 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00fa008c-8f60-4ec4-ba95-a58e71658276-config\") pod \"openshift-apiserver-operator-796bbdcf4f-cbfc7\" (UID: \"00fa008c-8f60-4ec4-ba95-a58e71658276\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cbfc7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.271403 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/5e51587e-3444-440f-802a-347a93a869ad-etcd-ca\") pod \"etcd-operator-b45778765-j9xtz\" (UID: \"5e51587e-3444-440f-802a-347a93a869ad\") " pod="openshift-etcd-operator/etcd-operator-b45778765-j9xtz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.271474 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/12041e15-b6da-4d62-b434-ceb0e39480a6-service-ca-bundle\") pod \"authentication-operator-69f744f599-sfnhj\" (UID: \"12041e15-b6da-4d62-b434-ceb0e39480a6\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-sfnhj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.271548 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ppwtx\" (UniqueName: \"kubernetes.io/projected/3526640e-85a9-41f1-b79d-c31854227b25-kube-api-access-ppwtx\") pod \"machine-api-operator-5694c8668f-6d5lf\" (UID: \"3526640e-85a9-41f1-b79d-c31854227b25\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6d5lf" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.271620 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hf4vz\" (UniqueName: \"kubernetes.io/projected/5e51587e-3444-440f-802a-347a93a869ad-kube-api-access-hf4vz\") pod \"etcd-operator-b45778765-j9xtz\" (UID: \"5e51587e-3444-440f-802a-347a93a869ad\") " pod="openshift-etcd-operator/etcd-operator-b45778765-j9xtz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.271695 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b9194562-89b4-49cc-b0d2-7875fd2640d8-audit-dir\") pod \"apiserver-7bbb656c7d-bb2c8\" (UID: \"b9194562-89b4-49cc-b0d2-7875fd2640d8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.271768 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/00fa008c-8f60-4ec4-ba95-a58e71658276-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-cbfc7\" (UID: \"00fa008c-8f60-4ec4-ba95-a58e71658276\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cbfc7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.271857 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/12041e15-b6da-4d62-b434-ceb0e39480a6-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-sfnhj\" (UID: \"12041e15-b6da-4d62-b434-ceb0e39480a6\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-sfnhj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.271929 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.271998 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-config\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.272070 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-serving-cert\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.270638 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-ck2fz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.279222 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4s99g"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.285076 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k6pjp" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.285909 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.286141 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.286340 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.286424 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.286465 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.286900 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.287077 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.287113 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.292914 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.294324 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.297544 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.298354 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.307526 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.320874 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-zpbbr"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.323049 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.324472 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-lj4dw"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.326149 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4s99g" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.326179 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.326767 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zpbbr" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.326179 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.330443 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.332337 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.332812 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5n2lb"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.333185 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-xg5fl"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.333315 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-lj4dw" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.333495 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5n2lb" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.333982 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9s4dd"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.334176 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.334399 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.334532 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9s4dd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.337896 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.342480 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-w9k7n"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.344273 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-w9k7n" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.348832 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jwxvn"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.352589 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jwxvn" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.354573 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-dflsj"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.355590 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-dflsj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.358962 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.360178 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-b7j29"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.362358 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zjkp"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.362533 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-b7j29" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.363635 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zjkp" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.364472 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-llpdj"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.365913 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-llpdj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.367392 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n8vrv"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.368424 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460825-hv5kn"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.368581 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n8vrv" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.369381 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460825-hv5kn" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.370262 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-4hrg2"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.370961 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4hrg2" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.372096 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-4lmhx"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.372959 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-console-config\") pod \"console-f9d7485db-g5xxj\" (UID: \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\") " pod="openshift-console/console-f9d7485db-g5xxj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.373002 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ncrkw\" (UniqueName: \"kubernetes.io/projected/e7d8a10a-7ee3-4aa6-a30f-e7ca0711c65c-kube-api-access-ncrkw\") pod \"kube-storage-version-migrator-operator-b67b599dd-k6pjp\" (UID: \"e7d8a10a-7ee3-4aa6-a30f-e7ca0711c65c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k6pjp" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.373036 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7958e921-b665-41a4-8989-5988e6082b50-config\") pod \"kube-apiserver-operator-766d6c64bb-5n2lb\" (UID: \"7958e921-b665-41a4-8989-5988e6082b50\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5n2lb" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.373069 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/c897d56d-7140-4aae-b1df-288502d6c78c-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-9tbp2\" (UID: \"c897d56d-7140-4aae-b1df-288502d6c78c\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9tbp2" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.373138 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/56ef4f14-9acd-41fe-894d-5fbbe990da8e-config\") pod \"kube-controller-manager-operator-78b949d7b-4s99g\" (UID: \"56ef4f14-9acd-41fe-894d-5fbbe990da8e\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4s99g" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.373152 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-4lmhx" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.373202 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9bk7j\" (UniqueName: \"kubernetes.io/projected/f869ba01-9cc5-403c-a234-7a6e4864c8fb-kube-api-access-9bk7j\") pod \"controller-manager-879f6c89f-8z8h7\" (UID: \"f869ba01-9cc5-403c-a234-7a6e4864c8fb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-8z8h7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.373315 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vc658\" (UniqueName: \"kubernetes.io/projected/c897d56d-7140-4aae-b1df-288502d6c78c-kube-api-access-vc658\") pod \"cluster-samples-operator-665b6dd947-9tbp2\" (UID: \"c897d56d-7140-4aae-b1df-288502d6c78c\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9tbp2" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.373356 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/55abc4f3-cea0-4cfc-9cb8-49c2be5598c1-proxy-tls\") pod \"machine-config-controller-84d6567774-8ncwl\" (UID: \"55abc4f3-cea0-4cfc-9cb8-49c2be5598c1\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8ncwl" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.373360 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-68wbd"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.373398 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.373435 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5e51587e-3444-440f-802a-347a93a869ad-etcd-client\") pod \"etcd-operator-b45778765-j9xtz\" (UID: \"5e51587e-3444-440f-802a-347a93a869ad\") " pod="openshift-etcd-operator/etcd-operator-b45778765-j9xtz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.373457 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/3526640e-85a9-41f1-b79d-c31854227b25-images\") pod \"machine-api-operator-5694c8668f-6d5lf\" (UID: \"3526640e-85a9-41f1-b79d-c31854227b25\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6d5lf" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.373479 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.373508 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d5c787df-89be-4f93-8fae-b35d9bea1dfa-trusted-ca\") pod \"ingress-operator-5b745b69d9-zpbbr\" (UID: \"d5c787df-89be-4f93-8fae-b35d9bea1dfa\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zpbbr" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.373538 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7jfcs\" (UniqueName: \"kubernetes.io/projected/d5c787df-89be-4f93-8fae-b35d9bea1dfa-kube-api-access-7jfcs\") pod \"ingress-operator-5b745b69d9-zpbbr\" (UID: \"d5c787df-89be-4f93-8fae-b35d9bea1dfa\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zpbbr" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.373581 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-console-serving-cert\") pod \"console-f9d7485db-g5xxj\" (UID: \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\") " pod="openshift-console/console-f9d7485db-g5xxj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.373601 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d5c787df-89be-4f93-8fae-b35d9bea1dfa-bound-sa-token\") pod \"ingress-operator-5b745b69d9-zpbbr\" (UID: \"d5c787df-89be-4f93-8fae-b35d9bea1dfa\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zpbbr" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.373625 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00fa008c-8f60-4ec4-ba95-a58e71658276-config\") pod \"openshift-apiserver-operator-796bbdcf4f-cbfc7\" (UID: \"00fa008c-8f60-4ec4-ba95-a58e71658276\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cbfc7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.373645 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/d5c787df-89be-4f93-8fae-b35d9bea1dfa-metrics-tls\") pod \"ingress-operator-5b745b69d9-zpbbr\" (UID: \"d5c787df-89be-4f93-8fae-b35d9bea1dfa\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zpbbr" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.373679 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-serving-cert\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.373702 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/5e51587e-3444-440f-802a-347a93a869ad-etcd-service-ca\") pod \"etcd-operator-b45778765-j9xtz\" (UID: \"5e51587e-3444-440f-802a-347a93a869ad\") " pod="openshift-etcd-operator/etcd-operator-b45778765-j9xtz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.373726 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d856811b-12c9-4b55-bf0d-3da687639b65-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-m9xnz\" (UID: \"d856811b-12c9-4b55-bf0d-3da687639b65\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m9xnz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.373749 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/aa805313-499f-47e9-8ffa-827fb2664a71-audit-dir\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.373771 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/55abc4f3-cea0-4cfc-9cb8-49c2be5598c1-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-8ncwl\" (UID: \"55abc4f3-cea0-4cfc-9cb8-49c2be5598c1\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8ncwl" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.373795 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tx24f\" (UniqueName: \"kubernetes.io/projected/55abc4f3-cea0-4cfc-9cb8-49c2be5598c1-kube-api-access-tx24f\") pod \"machine-config-controller-84d6567774-8ncwl\" (UID: \"55abc4f3-cea0-4cfc-9cb8-49c2be5598c1\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8ncwl" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.373826 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7xx7h\" (UniqueName: \"kubernetes.io/projected/42cd7d43-1bdf-4961-bcae-6f638a83b8e0-kube-api-access-7xx7h\") pod \"openshift-controller-manager-operator-756b6f6bc6-47djl\" (UID: \"42cd7d43-1bdf-4961-bcae-6f638a83b8e0\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-47djl" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.373848 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.373869 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69e2d768-6b62-446e-a239-4b221ba0a979-config\") pod \"route-controller-manager-6576b87f9c-82zlp\" (UID: \"69e2d768-6b62-446e-a239-4b221ba0a979\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-82zlp" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.373894 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-service-ca\") pod \"console-f9d7485db-g5xxj\" (UID: \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\") " pod="openshift-console/console-f9d7485db-g5xxj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.373922 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/12041e15-b6da-4d62-b434-ceb0e39480a6-serving-cert\") pod \"authentication-operator-69f744f599-sfnhj\" (UID: \"12041e15-b6da-4d62-b434-ceb0e39480a6\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-sfnhj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.373948 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b9194562-89b4-49cc-b0d2-7875fd2640d8-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-bb2c8\" (UID: \"b9194562-89b4-49cc-b0d2-7875fd2640d8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374006 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/37186a9e-7601-41d6-9083-c2231119635d-trusted-ca\") pod \"console-operator-58897d9998-ck2fz\" (UID: \"37186a9e-7601-41d6-9083-c2231119635d\") " pod="openshift-console-operator/console-operator-58897d9998-ck2fz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374029 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/d0c9cc0f-1a99-4138-a54b-e33f6ac83988-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-jwxvn\" (UID: \"d0c9cc0f-1a99-4138-a54b-e33f6ac83988\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jwxvn" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374055 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-encryption-config\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374076 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sk4sv\" (UniqueName: \"kubernetes.io/projected/ed6873d6-2014-4326-bb4f-939fab37b01c-kube-api-access-sk4sv\") pod \"openshift-config-operator-7777fb866f-pz96p\" (UID: \"ed6873d6-2014-4326-bb4f-939fab37b01c\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-pz96p" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374102 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9b552109-8c1c-489b-9a57-028d5f24e462-auth-proxy-config\") pod \"machine-config-operator-74547568cd-ktdtg\" (UID: \"9b552109-8c1c-489b-9a57-028d5f24e462\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ktdtg" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374165 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7pc2s\" (UniqueName: \"kubernetes.io/projected/b859ccb0-eb52-4086-8db1-cf1543b934d9-kube-api-access-7pc2s\") pod \"downloads-7954f5f757-k2d98\" (UID: \"b859ccb0-eb52-4086-8db1-cf1543b934d9\") " pod="openshift-console/downloads-7954f5f757-k2d98" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374190 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/b9194562-89b4-49cc-b0d2-7875fd2640d8-etcd-client\") pod \"apiserver-7bbb656c7d-bb2c8\" (UID: \"b9194562-89b4-49cc-b0d2-7875fd2640d8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374216 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37186a9e-7601-41d6-9083-c2231119635d-config\") pod \"console-operator-58897d9998-ck2fz\" (UID: \"37186a9e-7601-41d6-9083-c2231119635d\") " pod="openshift-console-operator/console-operator-58897d9998-ck2fz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374237 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/aa805313-499f-47e9-8ffa-827fb2664a71-audit-policies\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374260 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-image-import-ca\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374346 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/56ef4f14-9acd-41fe-894d-5fbbe990da8e-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-4s99g\" (UID: \"56ef4f14-9acd-41fe-894d-5fbbe990da8e\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4s99g" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374364 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/37186a9e-7601-41d6-9083-c2231119635d-serving-cert\") pod \"console-operator-58897d9998-ck2fz\" (UID: \"37186a9e-7601-41d6-9083-c2231119635d\") " pod="openshift-console-operator/console-operator-58897d9998-ck2fz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374402 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374414 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-68wbd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374426 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lgv6j\" (UniqueName: \"kubernetes.io/projected/12041e15-b6da-4d62-b434-ceb0e39480a6-kube-api-access-lgv6j\") pod \"authentication-operator-69f744f599-sfnhj\" (UID: \"12041e15-b6da-4d62-b434-ceb0e39480a6\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-sfnhj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374448 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/37a900bd-079a-4b57-a7e6-e12a71e50d2f-machine-approver-tls\") pod \"machine-approver-56656f9798-jh8zt\" (UID: \"37a900bd-079a-4b57-a7e6-e12a71e50d2f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jh8zt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374475 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374515 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xkffq\" (UniqueName: \"kubernetes.io/projected/37a900bd-079a-4b57-a7e6-e12a71e50d2f-kube-api-access-xkffq\") pod \"machine-approver-56656f9798-jh8zt\" (UID: \"37a900bd-079a-4b57-a7e6-e12a71e50d2f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jh8zt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374533 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374551 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12041e15-b6da-4d62-b434-ceb0e39480a6-config\") pod \"authentication-operator-69f744f599-sfnhj\" (UID: \"12041e15-b6da-4d62-b434-ceb0e39480a6\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-sfnhj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374570 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/99fe5999-eb63-4256-885a-11e4e2023e30-metrics-tls\") pod \"dns-operator-744455d44c-w9k7n\" (UID: \"99fe5999-eb63-4256-885a-11e4e2023e30\") " pod="openshift-dns-operator/dns-operator-744455d44c-w9k7n" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374588 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374607 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/42cd7d43-1bdf-4961-bcae-6f638a83b8e0-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-47djl\" (UID: \"42cd7d43-1bdf-4961-bcae-6f638a83b8e0\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-47djl" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374625 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7958e921-b665-41a4-8989-5988e6082b50-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-5n2lb\" (UID: \"7958e921-b665-41a4-8989-5988e6082b50\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5n2lb" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374646 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4f414412-c2c0-4fea-a255-2444675c6f5e-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-44vj4\" (UID: \"4f414412-c2c0-4fea-a255-2444675c6f5e\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-44vj4" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374665 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374686 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/9b552109-8c1c-489b-9a57-028d5f24e462-images\") pod \"machine-config-operator-74547568cd-ktdtg\" (UID: \"9b552109-8c1c-489b-9a57-028d5f24e462\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ktdtg" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374703 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9b552109-8c1c-489b-9a57-028d5f24e462-proxy-tls\") pod \"machine-config-operator-74547568cd-ktdtg\" (UID: \"9b552109-8c1c-489b-9a57-028d5f24e462\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ktdtg" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374723 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/37a900bd-079a-4b57-a7e6-e12a71e50d2f-auth-proxy-config\") pod \"machine-approver-56656f9798-jh8zt\" (UID: \"37a900bd-079a-4b57-a7e6-e12a71e50d2f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jh8zt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374742 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374762 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-trusted-ca-bundle\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374780 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b9194562-89b4-49cc-b0d2-7875fd2640d8-serving-cert\") pod \"apiserver-7bbb656c7d-bb2c8\" (UID: \"b9194562-89b4-49cc-b0d2-7875fd2640d8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374797 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3526640e-85a9-41f1-b79d-c31854227b25-config\") pod \"machine-api-operator-5694c8668f-6d5lf\" (UID: \"3526640e-85a9-41f1-b79d-c31854227b25\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6d5lf" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374824 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d856811b-12c9-4b55-bf0d-3da687639b65-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-m9xnz\" (UID: \"d856811b-12c9-4b55-bf0d-3da687639b65\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m9xnz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374844 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-62777\" (UniqueName: \"kubernetes.io/projected/00fa008c-8f60-4ec4-ba95-a58e71658276-kube-api-access-62777\") pod \"openshift-apiserver-operator-796bbdcf4f-cbfc7\" (UID: \"00fa008c-8f60-4ec4-ba95-a58e71658276\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cbfc7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374866 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cczz9\" (UniqueName: \"kubernetes.io/projected/37186a9e-7601-41d6-9083-c2231119635d-kube-api-access-cczz9\") pod \"console-operator-58897d9998-ck2fz\" (UID: \"37186a9e-7601-41d6-9083-c2231119635d\") " pod="openshift-console-operator/console-operator-58897d9998-ck2fz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374883 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5e51587e-3444-440f-802a-347a93a869ad-serving-cert\") pod \"etcd-operator-b45778765-j9xtz\" (UID: \"5e51587e-3444-440f-802a-347a93a869ad\") " pod="openshift-etcd-operator/etcd-operator-b45778765-j9xtz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374899 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42cd7d43-1bdf-4961-bcae-6f638a83b8e0-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-47djl\" (UID: \"42cd7d43-1bdf-4961-bcae-6f638a83b8e0\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-47djl" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374918 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f869ba01-9cc5-403c-a234-7a6e4864c8fb-serving-cert\") pod \"controller-manager-879f6c89f-8z8h7\" (UID: \"f869ba01-9cc5-403c-a234-7a6e4864c8fb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-8z8h7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374935 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/69e2d768-6b62-446e-a239-4b221ba0a979-serving-cert\") pod \"route-controller-manager-6576b87f9c-82zlp\" (UID: \"69e2d768-6b62-446e-a239-4b221ba0a979\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-82zlp" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374953 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-node-pullsecrets\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374970 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/5e51587e-3444-440f-802a-347a93a869ad-etcd-ca\") pod \"etcd-operator-b45778765-j9xtz\" (UID: \"5e51587e-3444-440f-802a-347a93a869ad\") " pod="openshift-etcd-operator/etcd-operator-b45778765-j9xtz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374986 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/12041e15-b6da-4d62-b434-ceb0e39480a6-service-ca-bundle\") pod \"authentication-operator-69f744f599-sfnhj\" (UID: \"12041e15-b6da-4d62-b434-ceb0e39480a6\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-sfnhj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375003 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ppwtx\" (UniqueName: \"kubernetes.io/projected/3526640e-85a9-41f1-b79d-c31854227b25-kube-api-access-ppwtx\") pod \"machine-api-operator-5694c8668f-6d5lf\" (UID: \"3526640e-85a9-41f1-b79d-c31854227b25\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6d5lf" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375020 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b9194562-89b4-49cc-b0d2-7875fd2640d8-audit-dir\") pod \"apiserver-7bbb656c7d-bb2c8\" (UID: \"b9194562-89b4-49cc-b0d2-7875fd2640d8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375037 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/00fa008c-8f60-4ec4-ba95-a58e71658276-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-cbfc7\" (UID: \"00fa008c-8f60-4ec4-ba95-a58e71658276\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cbfc7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375058 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hf4vz\" (UniqueName: \"kubernetes.io/projected/5e51587e-3444-440f-802a-347a93a869ad-kube-api-access-hf4vz\") pod \"etcd-operator-b45778765-j9xtz\" (UID: \"5e51587e-3444-440f-802a-347a93a869ad\") " pod="openshift-etcd-operator/etcd-operator-b45778765-j9xtz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375079 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375079 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/3526640e-85a9-41f1-b79d-c31854227b25-images\") pod \"machine-api-operator-5694c8668f-6d5lf\" (UID: \"3526640e-85a9-41f1-b79d-c31854227b25\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6d5lf" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375095 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-config\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375200 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/12041e15-b6da-4d62-b434-ceb0e39480a6-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-sfnhj\" (UID: \"12041e15-b6da-4d62-b434-ceb0e39480a6\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-sfnhj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375260 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t6g27\" (UniqueName: \"kubernetes.io/projected/9b552109-8c1c-489b-9a57-028d5f24e462-kube-api-access-t6g27\") pod \"machine-config-operator-74547568cd-ktdtg\" (UID: \"9b552109-8c1c-489b-9a57-028d5f24e462\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ktdtg" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375315 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/b9194562-89b4-49cc-b0d2-7875fd2640d8-encryption-config\") pod \"apiserver-7bbb656c7d-bb2c8\" (UID: \"b9194562-89b4-49cc-b0d2-7875fd2640d8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375364 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e51587e-3444-440f-802a-347a93a869ad-config\") pod \"etcd-operator-b45778765-j9xtz\" (UID: \"5e51587e-3444-440f-802a-347a93a869ad\") " pod="openshift-etcd-operator/etcd-operator-b45778765-j9xtz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375417 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-console-oauth-config\") pod \"console-f9d7485db-g5xxj\" (UID: \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\") " pod="openshift-console/console-f9d7485db-g5xxj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375480 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-audit\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375516 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-shhkp\" (UniqueName: \"kubernetes.io/projected/d856811b-12c9-4b55-bf0d-3da687639b65-kube-api-access-shhkp\") pod \"cluster-image-registry-operator-dc59b4c8b-m9xnz\" (UID: \"d856811b-12c9-4b55-bf0d-3da687639b65\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m9xnz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375547 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375538 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-etcd-serving-ca\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375589 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/ed6873d6-2014-4326-bb4f-939fab37b01c-available-featuregates\") pod \"openshift-config-operator-7777fb866f-pz96p\" (UID: \"ed6873d6-2014-4326-bb4f-939fab37b01c\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-pz96p" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375614 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7958e921-b665-41a4-8989-5988e6082b50-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-5n2lb\" (UID: \"7958e921-b665-41a4-8989-5988e6082b50\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5n2lb" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375658 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f869ba01-9cc5-403c-a234-7a6e4864c8fb-config\") pod \"controller-manager-879f6c89f-8z8h7\" (UID: \"f869ba01-9cc5-403c-a234-7a6e4864c8fb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-8z8h7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375675 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00fa008c-8f60-4ec4-ba95-a58e71658276-config\") pod \"openshift-apiserver-operator-796bbdcf4f-cbfc7\" (UID: \"00fa008c-8f60-4ec4-ba95-a58e71658276\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cbfc7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375688 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/56ef4f14-9acd-41fe-894d-5fbbe990da8e-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-4s99g\" (UID: \"56ef4f14-9acd-41fe-894d-5fbbe990da8e\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4s99g" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375733 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/b9194562-89b4-49cc-b0d2-7875fd2640d8-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-bb2c8\" (UID: \"b9194562-89b4-49cc-b0d2-7875fd2640d8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375757 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rs6qv\" (UniqueName: \"kubernetes.io/projected/aa805313-499f-47e9-8ffa-827fb2664a71-kube-api-access-rs6qv\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375774 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/69e2d768-6b62-446e-a239-4b221ba0a979-client-ca\") pod \"route-controller-manager-6576b87f9c-82zlp\" (UID: \"69e2d768-6b62-446e-a239-4b221ba0a979\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-82zlp" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375810 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-audit-dir\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375813 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-config\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375827 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b9194562-89b4-49cc-b0d2-7875fd2640d8-audit-policies\") pod \"apiserver-7bbb656c7d-bb2c8\" (UID: \"b9194562-89b4-49cc-b0d2-7875fd2640d8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375849 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z9t8t\" (UniqueName: \"kubernetes.io/projected/b9194562-89b4-49cc-b0d2-7875fd2640d8-kube-api-access-z9t8t\") pod \"apiserver-7bbb656c7d-bb2c8\" (UID: \"b9194562-89b4-49cc-b0d2-7875fd2640d8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375862 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/aa805313-499f-47e9-8ffa-827fb2664a71-audit-dir\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375885 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed6873d6-2014-4326-bb4f-939fab37b01c-serving-cert\") pod \"openshift-config-operator-7777fb866f-pz96p\" (UID: \"ed6873d6-2014-4326-bb4f-939fab37b01c\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-pz96p" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375910 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b9m87\" (UniqueName: \"kubernetes.io/projected/c14b4107-38b0-488e-a466-34fe6914f075-kube-api-access-b9m87\") pod \"migrator-59844c95c7-lj4dw\" (UID: \"c14b4107-38b0-488e-a466-34fe6914f075\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-lj4dw" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375932 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f869ba01-9cc5-403c-a234-7a6e4864c8fb-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-8z8h7\" (UID: \"f869ba01-9cc5-403c-a234-7a6e4864c8fb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-8z8h7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375972 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whqw7\" (UniqueName: \"kubernetes.io/projected/99fe5999-eb63-4256-885a-11e4e2023e30-kube-api-access-whqw7\") pod \"dns-operator-744455d44c-w9k7n\" (UID: \"99fe5999-eb63-4256-885a-11e4e2023e30\") " pod="openshift-dns-operator/dns-operator-744455d44c-w9k7n" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.375996 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.376013 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-trusted-ca-bundle\") pod \"console-f9d7485db-g5xxj\" (UID: \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\") " pod="openshift-console/console-f9d7485db-g5xxj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.376046 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/d856811b-12c9-4b55-bf0d-3da687639b65-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-m9xnz\" (UID: \"d856811b-12c9-4b55-bf0d-3da687639b65\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m9xnz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.376067 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4bwx5\" (UniqueName: \"kubernetes.io/projected/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-kube-api-access-4bwx5\") pod \"console-f9d7485db-g5xxj\" (UID: \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\") " pod="openshift-console/console-f9d7485db-g5xxj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.376085 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-etcd-client\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.376159 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7d8a10a-7ee3-4aa6-a30f-e7ca0711c65c-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-k6pjp\" (UID: \"e7d8a10a-7ee3-4aa6-a30f-e7ca0711c65c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k6pjp" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.376177 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f869ba01-9cc5-403c-a234-7a6e4864c8fb-client-ca\") pod \"controller-manager-879f6c89f-8z8h7\" (UID: \"f869ba01-9cc5-403c-a234-7a6e4864c8fb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-8z8h7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.376204 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nkqmv\" (UniqueName: \"kubernetes.io/projected/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-kube-api-access-nkqmv\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.376260 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/3526640e-85a9-41f1-b79d-c31854227b25-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-6d5lf\" (UID: \"3526640e-85a9-41f1-b79d-c31854227b25\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6d5lf" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.376289 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kdlk2\" (UniqueName: \"kubernetes.io/projected/d0c9cc0f-1a99-4138-a54b-e33f6ac83988-kube-api-access-kdlk2\") pod \"package-server-manager-789f6589d5-jwxvn\" (UID: \"d0c9cc0f-1a99-4138-a54b-e33f6ac83988\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jwxvn" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.376338 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4f414412-c2c0-4fea-a255-2444675c6f5e-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-44vj4\" (UID: \"4f414412-c2c0-4fea-a255-2444675c6f5e\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-44vj4" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.376361 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmxxh\" (UniqueName: \"kubernetes.io/projected/69e2d768-6b62-446e-a239-4b221ba0a979-kube-api-access-zmxxh\") pod \"route-controller-manager-6576b87f9c-82zlp\" (UID: \"69e2d768-6b62-446e-a239-4b221ba0a979\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-82zlp" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.376380 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f414412-c2c0-4fea-a255-2444675c6f5e-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-44vj4\" (UID: \"4f414412-c2c0-4fea-a255-2444675c6f5e\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-44vj4" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.376430 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37a900bd-079a-4b57-a7e6-e12a71e50d2f-config\") pod \"machine-approver-56656f9798-jh8zt\" (UID: \"37a900bd-079a-4b57-a7e6-e12a71e50d2f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jh8zt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.376451 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7d8a10a-7ee3-4aa6-a30f-e7ca0711c65c-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-k6pjp\" (UID: \"e7d8a10a-7ee3-4aa6-a30f-e7ca0711c65c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k6pjp" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.376493 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-oauth-serving-cert\") pod \"console-f9d7485db-g5xxj\" (UID: \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\") " pod="openshift-console/console-f9d7485db-g5xxj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.376757 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/12041e15-b6da-4d62-b434-ceb0e39480a6-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-sfnhj\" (UID: \"12041e15-b6da-4d62-b434-ceb0e39480a6\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-sfnhj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.376879 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-audit\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.377482 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-oauth-serving-cert\") pod \"console-f9d7485db-g5xxj\" (UID: \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\") " pod="openshift-console/console-f9d7485db-g5xxj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.377506 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-etcd-serving-ca\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.377619 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/ed6873d6-2014-4326-bb4f-939fab37b01c-available-featuregates\") pod \"openshift-config-operator-7777fb866f-pz96p\" (UID: \"ed6873d6-2014-4326-bb4f-939fab37b01c\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-pz96p" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.377800 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-ktdtg"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.378081 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-audit-dir\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.378734 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b9194562-89b4-49cc-b0d2-7875fd2640d8-audit-policies\") pod \"apiserver-7bbb656c7d-bb2c8\" (UID: \"b9194562-89b4-49cc-b0d2-7875fd2640d8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.378788 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4s99g"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.378830 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.379642 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e51587e-3444-440f-802a-347a93a869ad-config\") pod \"etcd-operator-b45778765-j9xtz\" (UID: \"5e51587e-3444-440f-802a-347a93a869ad\") " pod="openshift-etcd-operator/etcd-operator-b45778765-j9xtz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.380009 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f869ba01-9cc5-403c-a234-7a6e4864c8fb-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-8z8h7\" (UID: \"f869ba01-9cc5-403c-a234-7a6e4864c8fb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-8z8h7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.380254 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-trusted-ca-bundle\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.380647 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/b9194562-89b4-49cc-b0d2-7875fd2640d8-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-bb2c8\" (UID: \"b9194562-89b4-49cc-b0d2-7875fd2640d8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.381140 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/69e2d768-6b62-446e-a239-4b221ba0a979-client-ca\") pod \"route-controller-manager-6576b87f9c-82zlp\" (UID: \"69e2d768-6b62-446e-a239-4b221ba0a979\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-82zlp" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.382240 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3526640e-85a9-41f1-b79d-c31854227b25-config\") pod \"machine-api-operator-5694c8668f-6d5lf\" (UID: \"3526640e-85a9-41f1-b79d-c31854227b25\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6d5lf" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.376395 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/5e51587e-3444-440f-802a-347a93a869ad-etcd-service-ca\") pod \"etcd-operator-b45778765-j9xtz\" (UID: \"5e51587e-3444-440f-802a-347a93a869ad\") " pod="openshift-etcd-operator/etcd-operator-b45778765-j9xtz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.382924 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-service-ca\") pod \"console-f9d7485db-g5xxj\" (UID: \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\") " pod="openshift-console/console-f9d7485db-g5xxj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.374834 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-console-config\") pod \"console-f9d7485db-g5xxj\" (UID: \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\") " pod="openshift-console/console-f9d7485db-g5xxj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.383556 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b9194562-89b4-49cc-b0d2-7875fd2640d8-audit-dir\") pod \"apiserver-7bbb656c7d-bb2c8\" (UID: \"b9194562-89b4-49cc-b0d2-7875fd2640d8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.383731 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-node-pullsecrets\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.383780 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d856811b-12c9-4b55-bf0d-3da687639b65-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-m9xnz\" (UID: \"d856811b-12c9-4b55-bf0d-3da687639b65\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m9xnz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.383871 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.384075 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42cd7d43-1bdf-4961-bcae-6f638a83b8e0-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-47djl\" (UID: \"42cd7d43-1bdf-4961-bcae-6f638a83b8e0\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-47djl" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.380455 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-g5xxj"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.384450 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-serving-cert\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.387730 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.388217 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.388781 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/00fa008c-8f60-4ec4-ba95-a58e71658276-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-cbfc7\" (UID: \"00fa008c-8f60-4ec4-ba95-a58e71658276\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cbfc7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.388882 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/b9194562-89b4-49cc-b0d2-7875fd2640d8-encryption-config\") pod \"apiserver-7bbb656c7d-bb2c8\" (UID: \"b9194562-89b4-49cc-b0d2-7875fd2640d8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.389070 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-etcd-client\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.389253 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/b9194562-89b4-49cc-b0d2-7875fd2640d8-etcd-client\") pod \"apiserver-7bbb656c7d-bb2c8\" (UID: \"b9194562-89b4-49cc-b0d2-7875fd2640d8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.390069 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.390204 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-console-serving-cert\") pod \"console-f9d7485db-g5xxj\" (UID: \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\") " pod="openshift-console/console-f9d7485db-g5xxj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.390370 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f869ba01-9cc5-403c-a234-7a6e4864c8fb-client-ca\") pod \"controller-manager-879f6c89f-8z8h7\" (UID: \"f869ba01-9cc5-403c-a234-7a6e4864c8fb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-8z8h7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.390448 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/3526640e-85a9-41f1-b79d-c31854227b25-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-6d5lf\" (UID: \"3526640e-85a9-41f1-b79d-c31854227b25\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6d5lf" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.390583 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/12041e15-b6da-4d62-b434-ceb0e39480a6-service-ca-bundle\") pod \"authentication-operator-69f744f599-sfnhj\" (UID: \"12041e15-b6da-4d62-b434-ceb0e39480a6\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-sfnhj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.390946 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37a900bd-079a-4b57-a7e6-e12a71e50d2f-config\") pod \"machine-approver-56656f9798-jh8zt\" (UID: \"37a900bd-079a-4b57-a7e6-e12a71e50d2f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jh8zt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.390992 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/37a900bd-079a-4b57-a7e6-e12a71e50d2f-auth-proxy-config\") pod \"machine-approver-56656f9798-jh8zt\" (UID: \"37a900bd-079a-4b57-a7e6-e12a71e50d2f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jh8zt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.391399 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-trusted-ca-bundle\") pod \"console-f9d7485db-g5xxj\" (UID: \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\") " pod="openshift-console/console-f9d7485db-g5xxj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.391481 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b9194562-89b4-49cc-b0d2-7875fd2640d8-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-bb2c8\" (UID: \"b9194562-89b4-49cc-b0d2-7875fd2640d8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.392068 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed6873d6-2014-4326-bb4f-939fab37b01c-serving-cert\") pod \"openshift-config-operator-7777fb866f-pz96p\" (UID: \"ed6873d6-2014-4326-bb4f-939fab37b01c\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-pz96p" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.392751 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-8z8h7"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.393788 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-w9k7n"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.394557 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-gqzj7"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.395483 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-sfnhj"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.395602 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b9194562-89b4-49cc-b0d2-7875fd2640d8-serving-cert\") pod \"apiserver-7bbb656c7d-bb2c8\" (UID: \"b9194562-89b4-49cc-b0d2-7875fd2640d8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.396321 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/37a900bd-079a-4b57-a7e6-e12a71e50d2f-machine-approver-tls\") pod \"machine-approver-56656f9798-jh8zt\" (UID: \"37a900bd-079a-4b57-a7e6-e12a71e50d2f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jh8zt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.397690 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f869ba01-9cc5-403c-a234-7a6e4864c8fb-config\") pod \"controller-manager-879f6c89f-8z8h7\" (UID: \"f869ba01-9cc5-403c-a234-7a6e4864c8fb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-8z8h7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.397794 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69e2d768-6b62-446e-a239-4b221ba0a979-config\") pod \"route-controller-manager-6576b87f9c-82zlp\" (UID: \"69e2d768-6b62-446e-a239-4b221ba0a979\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-82zlp" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.397862 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/d856811b-12c9-4b55-bf0d-3da687639b65-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-m9xnz\" (UID: \"d856811b-12c9-4b55-bf0d-3da687639b65\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m9xnz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.397982 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.398028 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.398393 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.398497 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-encryption-config\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.398821 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/c897d56d-7140-4aae-b1df-288502d6c78c-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-9tbp2\" (UID: \"c897d56d-7140-4aae-b1df-288502d6c78c\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9tbp2" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.398874 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f869ba01-9cc5-403c-a234-7a6e4864c8fb-serving-cert\") pod \"controller-manager-879f6c89f-8z8h7\" (UID: \"f869ba01-9cc5-403c-a234-7a6e4864c8fb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-8z8h7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.399052 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.399373 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/aa805313-499f-47e9-8ffa-827fb2664a71-audit-policies\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.399515 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/5e51587e-3444-440f-802a-347a93a869ad-etcd-ca\") pod \"etcd-operator-b45778765-j9xtz\" (UID: \"5e51587e-3444-440f-802a-347a93a869ad\") " pod="openshift-etcd-operator/etcd-operator-b45778765-j9xtz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.399694 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-dflsj"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.400643 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5e51587e-3444-440f-802a-347a93a869ad-etcd-client\") pod \"etcd-operator-b45778765-j9xtz\" (UID: \"5e51587e-3444-440f-802a-347a93a869ad\") " pod="openshift-etcd-operator/etcd-operator-b45778765-j9xtz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.401016 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.401171 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5e51587e-3444-440f-802a-347a93a869ad-serving-cert\") pod \"etcd-operator-b45778765-j9xtz\" (UID: \"5e51587e-3444-440f-802a-347a93a869ad\") " pod="openshift-etcd-operator/etcd-operator-b45778765-j9xtz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.401179 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.401290 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k6pjp"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.402516 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-44vj4"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.402643 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12041e15-b6da-4d62-b434-ceb0e39480a6-config\") pod \"authentication-operator-69f744f599-sfnhj\" (UID: \"12041e15-b6da-4d62-b434-ceb0e39480a6\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-sfnhj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.402966 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/69e2d768-6b62-446e-a239-4b221ba0a979-serving-cert\") pod \"route-controller-manager-6576b87f9c-82zlp\" (UID: \"69e2d768-6b62-446e-a239-4b221ba0a979\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-82zlp" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.403156 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-image-import-ca\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.403432 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.405438 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/42cd7d43-1bdf-4961-bcae-6f638a83b8e0-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-47djl\" (UID: \"42cd7d43-1bdf-4961-bcae-6f638a83b8e0\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-47djl" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.405775 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/12041e15-b6da-4d62-b434-ceb0e39480a6-serving-cert\") pod \"authentication-operator-69f744f599-sfnhj\" (UID: \"12041e15-b6da-4d62-b434-ceb0e39480a6\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-sfnhj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.406041 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-k2d98"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.406808 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-console-oauth-config\") pod \"console-f9d7485db-g5xxj\" (UID: \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\") " pod="openshift-console/console-f9d7485db-g5xxj" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.407393 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-f56kd"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.409039 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-pz96p"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.411481 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-xg5fl"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.427479 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.429016 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-9pllm"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.431609 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-9pllm" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.431735 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-4qps7"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.432876 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-4qps7" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.433644 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-ck2fz"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.435647 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9s4dd"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.437063 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-8ncwl"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.437920 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.438156 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-b7j29"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.440635 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-zpbbr"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.442256 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-j9xtz"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.443570 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-lj4dw"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.444617 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jwxvn"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.446142 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5n2lb"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.447892 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-47djl"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.448550 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-82zlp"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.449874 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m9xnz"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.451072 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9tbp2"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.452233 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n8vrv"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.453310 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-4lmhx"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.454508 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zjkp"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.455594 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-9pllm"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.456633 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-6d5lf"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.458175 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.458185 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-4hrg2"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.458901 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460825-hv5kn"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.460050 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-68wbd"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.461113 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-llpdj"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.462091 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-4qps7"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.463209 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cbfc7"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.464283 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-rzh7r"] Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.465137 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-rzh7r" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.478103 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/37186a9e-7601-41d6-9083-c2231119635d-trusted-ca\") pod \"console-operator-58897d9998-ck2fz\" (UID: \"37186a9e-7601-41d6-9083-c2231119635d\") " pod="openshift-console-operator/console-operator-58897d9998-ck2fz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.478182 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.478198 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9b552109-8c1c-489b-9a57-028d5f24e462-auth-proxy-config\") pod \"machine-config-operator-74547568cd-ktdtg\" (UID: \"9b552109-8c1c-489b-9a57-028d5f24e462\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ktdtg" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.478909 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9b552109-8c1c-489b-9a57-028d5f24e462-auth-proxy-config\") pod \"machine-config-operator-74547568cd-ktdtg\" (UID: \"9b552109-8c1c-489b-9a57-028d5f24e462\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ktdtg" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.478991 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/d0c9cc0f-1a99-4138-a54b-e33f6ac83988-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-jwxvn\" (UID: \"d0c9cc0f-1a99-4138-a54b-e33f6ac83988\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jwxvn" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.479176 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37186a9e-7601-41d6-9083-c2231119635d-config\") pod \"console-operator-58897d9998-ck2fz\" (UID: \"37186a9e-7601-41d6-9083-c2231119635d\") " pod="openshift-console-operator/console-operator-58897d9998-ck2fz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.479211 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/56ef4f14-9acd-41fe-894d-5fbbe990da8e-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-4s99g\" (UID: \"56ef4f14-9acd-41fe-894d-5fbbe990da8e\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4s99g" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.479235 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/37186a9e-7601-41d6-9083-c2231119635d-serving-cert\") pod \"console-operator-58897d9998-ck2fz\" (UID: \"37186a9e-7601-41d6-9083-c2231119635d\") " pod="openshift-console-operator/console-operator-58897d9998-ck2fz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.479299 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/99fe5999-eb63-4256-885a-11e4e2023e30-metrics-tls\") pod \"dns-operator-744455d44c-w9k7n\" (UID: \"99fe5999-eb63-4256-885a-11e4e2023e30\") " pod="openshift-dns-operator/dns-operator-744455d44c-w9k7n" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.479323 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7958e921-b665-41a4-8989-5988e6082b50-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-5n2lb\" (UID: \"7958e921-b665-41a4-8989-5988e6082b50\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5n2lb" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.479390 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/9b552109-8c1c-489b-9a57-028d5f24e462-images\") pod \"machine-config-operator-74547568cd-ktdtg\" (UID: \"9b552109-8c1c-489b-9a57-028d5f24e462\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ktdtg" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.479421 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9b552109-8c1c-489b-9a57-028d5f24e462-proxy-tls\") pod \"machine-config-operator-74547568cd-ktdtg\" (UID: \"9b552109-8c1c-489b-9a57-028d5f24e462\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ktdtg" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.479461 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cczz9\" (UniqueName: \"kubernetes.io/projected/37186a9e-7601-41d6-9083-c2231119635d-kube-api-access-cczz9\") pod \"console-operator-58897d9998-ck2fz\" (UID: \"37186a9e-7601-41d6-9083-c2231119635d\") " pod="openshift-console-operator/console-operator-58897d9998-ck2fz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.479639 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t6g27\" (UniqueName: \"kubernetes.io/projected/9b552109-8c1c-489b-9a57-028d5f24e462-kube-api-access-t6g27\") pod \"machine-config-operator-74547568cd-ktdtg\" (UID: \"9b552109-8c1c-489b-9a57-028d5f24e462\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ktdtg" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.479695 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7958e921-b665-41a4-8989-5988e6082b50-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-5n2lb\" (UID: \"7958e921-b665-41a4-8989-5988e6082b50\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5n2lb" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.479748 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/56ef4f14-9acd-41fe-894d-5fbbe990da8e-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-4s99g\" (UID: \"56ef4f14-9acd-41fe-894d-5fbbe990da8e\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4s99g" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.479867 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b9m87\" (UniqueName: \"kubernetes.io/projected/c14b4107-38b0-488e-a466-34fe6914f075-kube-api-access-b9m87\") pod \"migrator-59844c95c7-lj4dw\" (UID: \"c14b4107-38b0-488e-a466-34fe6914f075\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-lj4dw" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.479918 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whqw7\" (UniqueName: \"kubernetes.io/projected/99fe5999-eb63-4256-885a-11e4e2023e30-kube-api-access-whqw7\") pod \"dns-operator-744455d44c-w9k7n\" (UID: \"99fe5999-eb63-4256-885a-11e4e2023e30\") " pod="openshift-dns-operator/dns-operator-744455d44c-w9k7n" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.479960 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7d8a10a-7ee3-4aa6-a30f-e7ca0711c65c-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-k6pjp\" (UID: \"e7d8a10a-7ee3-4aa6-a30f-e7ca0711c65c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k6pjp" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.480014 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kdlk2\" (UniqueName: \"kubernetes.io/projected/d0c9cc0f-1a99-4138-a54b-e33f6ac83988-kube-api-access-kdlk2\") pod \"package-server-manager-789f6589d5-jwxvn\" (UID: \"d0c9cc0f-1a99-4138-a54b-e33f6ac83988\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jwxvn" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.480157 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7d8a10a-7ee3-4aa6-a30f-e7ca0711c65c-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-k6pjp\" (UID: \"e7d8a10a-7ee3-4aa6-a30f-e7ca0711c65c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k6pjp" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.480218 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ncrkw\" (UniqueName: \"kubernetes.io/projected/e7d8a10a-7ee3-4aa6-a30f-e7ca0711c65c-kube-api-access-ncrkw\") pod \"kube-storage-version-migrator-operator-b67b599dd-k6pjp\" (UID: \"e7d8a10a-7ee3-4aa6-a30f-e7ca0711c65c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k6pjp" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.480247 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7958e921-b665-41a4-8989-5988e6082b50-config\") pod \"kube-apiserver-operator-766d6c64bb-5n2lb\" (UID: \"7958e921-b665-41a4-8989-5988e6082b50\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5n2lb" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.480292 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/56ef4f14-9acd-41fe-894d-5fbbe990da8e-config\") pod \"kube-controller-manager-operator-78b949d7b-4s99g\" (UID: \"56ef4f14-9acd-41fe-894d-5fbbe990da8e\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4s99g" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.480359 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/55abc4f3-cea0-4cfc-9cb8-49c2be5598c1-proxy-tls\") pod \"machine-config-controller-84d6567774-8ncwl\" (UID: \"55abc4f3-cea0-4cfc-9cb8-49c2be5598c1\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8ncwl" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.480898 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d5c787df-89be-4f93-8fae-b35d9bea1dfa-trusted-ca\") pod \"ingress-operator-5b745b69d9-zpbbr\" (UID: \"d5c787df-89be-4f93-8fae-b35d9bea1dfa\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zpbbr" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.480937 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7jfcs\" (UniqueName: \"kubernetes.io/projected/d5c787df-89be-4f93-8fae-b35d9bea1dfa-kube-api-access-7jfcs\") pod \"ingress-operator-5b745b69d9-zpbbr\" (UID: \"d5c787df-89be-4f93-8fae-b35d9bea1dfa\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zpbbr" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.480972 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/d5c787df-89be-4f93-8fae-b35d9bea1dfa-metrics-tls\") pod \"ingress-operator-5b745b69d9-zpbbr\" (UID: \"d5c787df-89be-4f93-8fae-b35d9bea1dfa\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zpbbr" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.480993 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d5c787df-89be-4f93-8fae-b35d9bea1dfa-bound-sa-token\") pod \"ingress-operator-5b745b69d9-zpbbr\" (UID: \"d5c787df-89be-4f93-8fae-b35d9bea1dfa\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zpbbr" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.481036 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/55abc4f3-cea0-4cfc-9cb8-49c2be5598c1-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-8ncwl\" (UID: \"55abc4f3-cea0-4cfc-9cb8-49c2be5598c1\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8ncwl" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.481069 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tx24f\" (UniqueName: \"kubernetes.io/projected/55abc4f3-cea0-4cfc-9cb8-49c2be5598c1-kube-api-access-tx24f\") pod \"machine-config-controller-84d6567774-8ncwl\" (UID: \"55abc4f3-cea0-4cfc-9cb8-49c2be5598c1\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8ncwl" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.482076 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/55abc4f3-cea0-4cfc-9cb8-49c2be5598c1-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-8ncwl\" (UID: \"55abc4f3-cea0-4cfc-9cb8-49c2be5598c1\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8ncwl" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.484146 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/55abc4f3-cea0-4cfc-9cb8-49c2be5598c1-proxy-tls\") pod \"machine-config-controller-84d6567774-8ncwl\" (UID: \"55abc4f3-cea0-4cfc-9cb8-49c2be5598c1\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8ncwl" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.498721 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.518066 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.520430 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/9b552109-8c1c-489b-9a57-028d5f24e462-images\") pod \"machine-config-operator-74547568cd-ktdtg\" (UID: \"9b552109-8c1c-489b-9a57-028d5f24e462\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ktdtg" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.538606 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.558652 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.564743 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9b552109-8c1c-489b-9a57-028d5f24e462-proxy-tls\") pod \"machine-config-operator-74547568cd-ktdtg\" (UID: \"9b552109-8c1c-489b-9a57-028d5f24e462\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ktdtg" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.598763 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.618984 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.639687 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.644206 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f414412-c2c0-4fea-a255-2444675c6f5e-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-44vj4\" (UID: \"4f414412-c2c0-4fea-a255-2444675c6f5e\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-44vj4" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.658461 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.666559 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4f414412-c2c0-4fea-a255-2444675c6f5e-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-44vj4\" (UID: \"4f414412-c2c0-4fea-a255-2444675c6f5e\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-44vj4" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.679198 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.682576 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.682842 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.682886 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.683662 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.684053 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:53:34 crc kubenswrapper[4910]: E0105 21:53:34.686656 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:55:36.686585542 +0000 UTC m=+268.264083212 (durationBeforeRetry 2m2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.691291 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.691724 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.695671 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.698013 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.700583 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.718427 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.739259 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.740238 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37186a9e-7601-41d6-9083-c2231119635d-config\") pod \"console-operator-58897d9998-ck2fz\" (UID: \"37186a9e-7601-41d6-9083-c2231119635d\") " pod="openshift-console-operator/console-operator-58897d9998-ck2fz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.759207 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.763254 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/37186a9e-7601-41d6-9083-c2231119635d-serving-cert\") pod \"console-operator-58897d9998-ck2fz\" (UID: \"37186a9e-7601-41d6-9083-c2231119635d\") " pod="openshift-console-operator/console-operator-58897d9998-ck2fz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.788904 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.799705 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/37186a9e-7601-41d6-9083-c2231119635d-trusted-ca\") pod \"console-operator-58897d9998-ck2fz\" (UID: \"37186a9e-7601-41d6-9083-c2231119635d\") " pod="openshift-console-operator/console-operator-58897d9998-ck2fz" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.801397 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.820262 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.839314 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.844785 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7d8a10a-7ee3-4aa6-a30f-e7ca0711c65c-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-k6pjp\" (UID: \"e7d8a10a-7ee3-4aa6-a30f-e7ca0711c65c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k6pjp" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.858501 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.878367 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.881246 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7d8a10a-7ee3-4aa6-a30f-e7ca0711c65c-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-k6pjp\" (UID: \"e7d8a10a-7ee3-4aa6-a30f-e7ca0711c65c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k6pjp" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.899500 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.904031 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/56ef4f14-9acd-41fe-894d-5fbbe990da8e-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-4s99g\" (UID: \"56ef4f14-9acd-41fe-894d-5fbbe990da8e\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4s99g" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.938759 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.939694 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.959724 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.963620 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.978446 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.984082 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 05 21:53:34 crc kubenswrapper[4910]: I0105 21:53:34.988575 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:34.998874 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.006181 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/d5c787df-89be-4f93-8fae-b35d9bea1dfa-metrics-tls\") pod \"ingress-operator-5b745b69d9-zpbbr\" (UID: \"d5c787df-89be-4f93-8fae-b35d9bea1dfa\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zpbbr" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.027743 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.033422 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d5c787df-89be-4f93-8fae-b35d9bea1dfa-trusted-ca\") pod \"ingress-operator-5b745b69d9-zpbbr\" (UID: \"d5c787df-89be-4f93-8fae-b35d9bea1dfa\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zpbbr" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.039290 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.058935 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.079845 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.101403 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.120223 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.122533 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7958e921-b665-41a4-8989-5988e6082b50-config\") pod \"kube-apiserver-operator-766d6c64bb-5n2lb\" (UID: \"7958e921-b665-41a4-8989-5988e6082b50\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5n2lb" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.138506 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.159513 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.166237 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7958e921-b665-41a4-8989-5988e6082b50-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-5n2lb\" (UID: \"7958e921-b665-41a4-8989-5988e6082b50\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5n2lb" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.180289 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.198815 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.219370 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.245852 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 05 21:53:35 crc kubenswrapper[4910]: W0105 21:53:35.253856 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-a29bad354aa39e7c53706e6e813d7984b98ad114e0fd44c57af793da92b8a8b7 WatchSource:0}: Error finding container a29bad354aa39e7c53706e6e813d7984b98ad114e0fd44c57af793da92b8a8b7: Status 404 returned error can't find the container with id a29bad354aa39e7c53706e6e813d7984b98ad114e0fd44c57af793da92b8a8b7 Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.258208 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.278697 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 05 21:53:35 crc kubenswrapper[4910]: W0105 21:53:35.281103 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fe485a1_e14f_4c09_b5b9_f252bc42b7e8.slice/crio-1de4444c8e1ff86bb7b4828e2234a98c1863ee105eb96515fbb45fdadc9a5853 WatchSource:0}: Error finding container 1de4444c8e1ff86bb7b4828e2234a98c1863ee105eb96515fbb45fdadc9a5853: Status 404 returned error can't find the container with id 1de4444c8e1ff86bb7b4828e2234a98c1863ee105eb96515fbb45fdadc9a5853 Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.298186 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.301787 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/56ef4f14-9acd-41fe-894d-5fbbe990da8e-config\") pod \"kube-controller-manager-operator-78b949d7b-4s99g\" (UID: \"56ef4f14-9acd-41fe-894d-5fbbe990da8e\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4s99g" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.320005 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.339172 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.345492 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/99fe5999-eb63-4256-885a-11e4e2023e30-metrics-tls\") pod \"dns-operator-744455d44c-w9k7n\" (UID: \"99fe5999-eb63-4256-885a-11e4e2023e30\") " pod="openshift-dns-operator/dns-operator-744455d44c-w9k7n" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.356535 4910 request.go:700] Waited for 1.010149591s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-dns-operator/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&limit=500&resourceVersion=0 Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.359946 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.379397 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.399215 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.406800 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/d0c9cc0f-1a99-4138-a54b-e33f6ac83988-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-jwxvn\" (UID: \"d0c9cc0f-1a99-4138-a54b-e33f6ac83988\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jwxvn" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.419045 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.438453 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.458721 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.498285 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.506184 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"a36f4c0d6fb3310b37dbc3d3c361ba3c58dc2746b4ff900dad7b75a1961c3171"} Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.506256 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"ade3cf98247a9af341d0f67539fd68a6927e23ec183f814549ed0ed483d3759e"} Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.507870 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"68d2ecabeda4fcc57dd14cdc43cb4a343a75dc9c6c9ba89c5cd78fdf6a3beca7"} Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.507953 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"1de4444c8e1ff86bb7b4828e2234a98c1863ee105eb96515fbb45fdadc9a5853"} Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.509168 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"7316b41daf8f7118e12443a12550bcec4b87b67b675d7e2fc564500dff2562a0"} Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.509221 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"a29bad354aa39e7c53706e6e813d7984b98ad114e0fd44c57af793da92b8a8b7"} Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.509358 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.518398 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.538541 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.559667 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.578809 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.598735 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.618887 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.638366 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.666443 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.678249 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.698532 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.719520 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.739304 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.759287 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.780547 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.799047 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.818491 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.838634 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.858794 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.898558 4910 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.910776 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9bk7j\" (UniqueName: \"kubernetes.io/projected/f869ba01-9cc5-403c-a234-7a6e4864c8fb-kube-api-access-9bk7j\") pod \"controller-manager-879f6c89f-8z8h7\" (UID: \"f869ba01-9cc5-403c-a234-7a6e4864c8fb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-8z8h7" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.919890 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.966570 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vc658\" (UniqueName: \"kubernetes.io/projected/c897d56d-7140-4aae-b1df-288502d6c78c-kube-api-access-vc658\") pod \"cluster-samples-operator-665b6dd947-9tbp2\" (UID: \"c897d56d-7140-4aae-b1df-288502d6c78c\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9tbp2" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.979170 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.979745 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d856811b-12c9-4b55-bf0d-3da687639b65-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-m9xnz\" (UID: \"d856811b-12c9-4b55-bf0d-3da687639b65\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m9xnz" Jan 05 21:53:35 crc kubenswrapper[4910]: I0105 21:53:35.999705 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.019594 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.038605 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.050436 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-8z8h7" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.059913 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.084030 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9tbp2" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.097598 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7xx7h\" (UniqueName: \"kubernetes.io/projected/42cd7d43-1bdf-4961-bcae-6f638a83b8e0-kube-api-access-7xx7h\") pod \"openshift-controller-manager-operator-756b6f6bc6-47djl\" (UID: \"42cd7d43-1bdf-4961-bcae-6f638a83b8e0\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-47djl" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.152517 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rs6qv\" (UniqueName: \"kubernetes.io/projected/aa805313-499f-47e9-8ffa-827fb2664a71-kube-api-access-rs6qv\") pod \"oauth-openshift-558db77b4-gqzj7\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.154768 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.159435 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nkqmv\" (UniqueName: \"kubernetes.io/projected/9f9b9ae8-cbde-41e1-b829-c1511c7f2973-kube-api-access-nkqmv\") pod \"apiserver-76f77b778f-f56kd\" (UID: \"9f9b9ae8-cbde-41e1-b829-c1511c7f2973\") " pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.166234 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-shhkp\" (UniqueName: \"kubernetes.io/projected/d856811b-12c9-4b55-bf0d-3da687639b65-kube-api-access-shhkp\") pod \"cluster-image-registry-operator-dc59b4c8b-m9xnz\" (UID: \"d856811b-12c9-4b55-bf0d-3da687639b65\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m9xnz" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.188373 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4f414412-c2c0-4fea-a255-2444675c6f5e-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-44vj4\" (UID: \"4f414412-c2c0-4fea-a255-2444675c6f5e\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-44vj4" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.203207 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z9t8t\" (UniqueName: \"kubernetes.io/projected/b9194562-89b4-49cc-b0d2-7875fd2640d8-kube-api-access-z9t8t\") pod \"apiserver-7bbb656c7d-bb2c8\" (UID: \"b9194562-89b4-49cc-b0d2-7875fd2640d8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.233515 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-47djl" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.234040 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmxxh\" (UniqueName: \"kubernetes.io/projected/69e2d768-6b62-446e-a239-4b221ba0a979-kube-api-access-zmxxh\") pod \"route-controller-manager-6576b87f9c-82zlp\" (UID: \"69e2d768-6b62-446e-a239-4b221ba0a979\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-82zlp" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.234208 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4bwx5\" (UniqueName: \"kubernetes.io/projected/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-kube-api-access-4bwx5\") pod \"console-f9d7485db-g5xxj\" (UID: \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\") " pod="openshift-console/console-f9d7485db-g5xxj" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.243524 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-82zlp" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.246486 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m9xnz" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.255796 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-g5xxj" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.264757 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-62777\" (UniqueName: \"kubernetes.io/projected/00fa008c-8f60-4ec4-ba95-a58e71658276-kube-api-access-62777\") pod \"openshift-apiserver-operator-796bbdcf4f-cbfc7\" (UID: \"00fa008c-8f60-4ec4-ba95-a58e71658276\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cbfc7" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.293796 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ppwtx\" (UniqueName: \"kubernetes.io/projected/3526640e-85a9-41f1-b79d-c31854227b25-kube-api-access-ppwtx\") pod \"machine-api-operator-5694c8668f-6d5lf\" (UID: \"3526640e-85a9-41f1-b79d-c31854227b25\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6d5lf" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.304388 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-44vj4" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.327539 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hf4vz\" (UniqueName: \"kubernetes.io/projected/5e51587e-3444-440f-802a-347a93a869ad-kube-api-access-hf4vz\") pod \"etcd-operator-b45778765-j9xtz\" (UID: \"5e51587e-3444-440f-802a-347a93a869ad\") " pod="openshift-etcd-operator/etcd-operator-b45778765-j9xtz" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.338621 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sk4sv\" (UniqueName: \"kubernetes.io/projected/ed6873d6-2014-4326-bb4f-939fab37b01c-kube-api-access-sk4sv\") pod \"openshift-config-operator-7777fb866f-pz96p\" (UID: \"ed6873d6-2014-4326-bb4f-939fab37b01c\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-pz96p" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.345009 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7pc2s\" (UniqueName: \"kubernetes.io/projected/b859ccb0-eb52-4086-8db1-cf1543b934d9-kube-api-access-7pc2s\") pod \"downloads-7954f5f757-k2d98\" (UID: \"b859ccb0-eb52-4086-8db1-cf1543b934d9\") " pod="openshift-console/downloads-7954f5f757-k2d98" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.359545 4910 request.go:700] Waited for 1.971986599s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication-operator/serviceaccounts/authentication-operator/token Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.365558 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.369101 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xkffq\" (UniqueName: \"kubernetes.io/projected/37a900bd-079a-4b57-a7e6-e12a71e50d2f-kube-api-access-xkffq\") pod \"machine-approver-56656f9798-jh8zt\" (UID: \"37a900bd-079a-4b57-a7e6-e12a71e50d2f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jh8zt" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.379670 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.384095 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lgv6j\" (UniqueName: \"kubernetes.io/projected/12041e15-b6da-4d62-b434-ceb0e39480a6-kube-api-access-lgv6j\") pod \"authentication-operator-69f744f599-sfnhj\" (UID: \"12041e15-b6da-4d62-b434-ceb0e39480a6\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-sfnhj" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.397663 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.400001 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.400503 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-8z8h7"] Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.420030 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.422384 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9tbp2"] Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.437852 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jh8zt" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.440613 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.458892 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.460610 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cbfc7" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.469373 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-k2d98" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.474382 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pz96p" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.478752 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.505156 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 05 21:53:36 crc kubenswrapper[4910]: W0105 21:53:36.509774 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf869ba01_9cc5_403c_a234_7a6e4864c8fb.slice/crio-15dfeafb5af7724704b54b6f3131908006772438d52ac5c7d72479889a54d0af WatchSource:0}: Error finding container 15dfeafb5af7724704b54b6f3131908006772438d52ac5c7d72479889a54d0af: Status 404 returned error can't find the container with id 15dfeafb5af7724704b54b6f3131908006772438d52ac5c7d72479889a54d0af Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.517853 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-8z8h7" event={"ID":"f869ba01-9cc5-403c-a234-7a6e4864c8fb","Type":"ContainerStarted","Data":"15dfeafb5af7724704b54b6f3131908006772438d52ac5c7d72479889a54d0af"} Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.518501 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.540637 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.559028 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.561631 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-j9xtz" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.590309 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m9xnz"] Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.590365 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-6d5lf" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.605328 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/56ef4f14-9acd-41fe-894d-5fbbe990da8e-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-4s99g\" (UID: \"56ef4f14-9acd-41fe-894d-5fbbe990da8e\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4s99g" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.605695 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-sfnhj" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.614454 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cczz9\" (UniqueName: \"kubernetes.io/projected/37186a9e-7601-41d6-9083-c2231119635d-kube-api-access-cczz9\") pod \"console-operator-58897d9998-ck2fz\" (UID: \"37186a9e-7601-41d6-9083-c2231119635d\") " pod="openshift-console-operator/console-operator-58897d9998-ck2fz" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.624101 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4s99g" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.626312 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-g5xxj"] Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.637676 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t6g27\" (UniqueName: \"kubernetes.io/projected/9b552109-8c1c-489b-9a57-028d5f24e462-kube-api-access-t6g27\") pod \"machine-config-operator-74547568cd-ktdtg\" (UID: \"9b552109-8c1c-489b-9a57-028d5f24e462\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ktdtg" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.660248 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7958e921-b665-41a4-8989-5988e6082b50-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-5n2lb\" (UID: \"7958e921-b665-41a4-8989-5988e6082b50\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5n2lb" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.674602 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b9m87\" (UniqueName: \"kubernetes.io/projected/c14b4107-38b0-488e-a466-34fe6914f075-kube-api-access-b9m87\") pod \"migrator-59844c95c7-lj4dw\" (UID: \"c14b4107-38b0-488e-a466-34fe6914f075\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-lj4dw" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.700135 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whqw7\" (UniqueName: \"kubernetes.io/projected/99fe5999-eb63-4256-885a-11e4e2023e30-kube-api-access-whqw7\") pod \"dns-operator-744455d44c-w9k7n\" (UID: \"99fe5999-eb63-4256-885a-11e4e2023e30\") " pod="openshift-dns-operator/dns-operator-744455d44c-w9k7n" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.719937 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-gqzj7"] Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.724961 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kdlk2\" (UniqueName: \"kubernetes.io/projected/d0c9cc0f-1a99-4138-a54b-e33f6ac83988-kube-api-access-kdlk2\") pod \"package-server-manager-789f6589d5-jwxvn\" (UID: \"d0c9cc0f-1a99-4138-a54b-e33f6ac83988\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jwxvn" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.742500 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ncrkw\" (UniqueName: \"kubernetes.io/projected/e7d8a10a-7ee3-4aa6-a30f-e7ca0711c65c-kube-api-access-ncrkw\") pod \"kube-storage-version-migrator-operator-b67b599dd-k6pjp\" (UID: \"e7d8a10a-7ee3-4aa6-a30f-e7ca0711c65c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k6pjp" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.772395 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7jfcs\" (UniqueName: \"kubernetes.io/projected/d5c787df-89be-4f93-8fae-b35d9bea1dfa-kube-api-access-7jfcs\") pod \"ingress-operator-5b745b69d9-zpbbr\" (UID: \"d5c787df-89be-4f93-8fae-b35d9bea1dfa\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zpbbr" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.794471 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d5c787df-89be-4f93-8fae-b35d9bea1dfa-bound-sa-token\") pod \"ingress-operator-5b745b69d9-zpbbr\" (UID: \"d5c787df-89be-4f93-8fae-b35d9bea1dfa\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zpbbr" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.807392 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tx24f\" (UniqueName: \"kubernetes.io/projected/55abc4f3-cea0-4cfc-9cb8-49c2be5598c1-kube-api-access-tx24f\") pod \"machine-config-controller-84d6567774-8ncwl\" (UID: \"55abc4f3-cea0-4cfc-9cb8-49c2be5598c1\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8ncwl" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.850142 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/3b0afa0a-d1fe-4c63-a25f-3fd39b954817-stats-auth\") pod \"router-default-5444994796-fcms5\" (UID: \"3b0afa0a-d1fe-4c63-a25f-3fd39b954817\") " pod="openshift-ingress/router-default-5444994796-fcms5" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.856278 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3b0afa0a-d1fe-4c63-a25f-3fd39b954817-service-ca-bundle\") pod \"router-default-5444994796-fcms5\" (UID: \"3b0afa0a-d1fe-4c63-a25f-3fd39b954817\") " pod="openshift-ingress/router-default-5444994796-fcms5" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.856682 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/5d128f8c-6ea3-4ba0-96bc-8fcd5aac98bf-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-9s4dd\" (UID: \"5d128f8c-6ea3-4ba0-96bc-8fcd5aac98bf\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9s4dd" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.856796 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ldcv\" (UniqueName: \"kubernetes.io/projected/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-kube-api-access-4ldcv\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.856930 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-trusted-ca\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.857031 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-installation-pull-secrets\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.857183 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3b0afa0a-d1fe-4c63-a25f-3fd39b954817-metrics-certs\") pod \"router-default-5444994796-fcms5\" (UID: \"3b0afa0a-d1fe-4c63-a25f-3fd39b954817\") " pod="openshift-ingress/router-default-5444994796-fcms5" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.857339 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfs5k\" (UniqueName: \"kubernetes.io/projected/5d128f8c-6ea3-4ba0-96bc-8fcd5aac98bf-kube-api-access-pfs5k\") pod \"control-plane-machine-set-operator-78cbb6b69f-9s4dd\" (UID: \"5d128f8c-6ea3-4ba0-96bc-8fcd5aac98bf\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9s4dd" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.857391 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-bound-sa-token\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.857416 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/3b0afa0a-d1fe-4c63-a25f-3fd39b954817-default-certificate\") pod \"router-default-5444994796-fcms5\" (UID: \"3b0afa0a-d1fe-4c63-a25f-3fd39b954817\") " pod="openshift-ingress/router-default-5444994796-fcms5" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.857439 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-registry-certificates\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.857492 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-ca-trust-extracted\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.857527 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.857564 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-registry-tls\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.857607 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4kb4\" (UniqueName: \"kubernetes.io/projected/3b0afa0a-d1fe-4c63-a25f-3fd39b954817-kube-api-access-s4kb4\") pod \"router-default-5444994796-fcms5\" (UID: \"3b0afa0a-d1fe-4c63-a25f-3fd39b954817\") " pod="openshift-ingress/router-default-5444994796-fcms5" Jan 05 21:53:36 crc kubenswrapper[4910]: E0105 21:53:36.858893 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:37.358882647 +0000 UTC m=+148.936380317 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.877675 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8ncwl" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.884776 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ktdtg" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.891832 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-47djl"] Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.892704 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-82zlp"] Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.916330 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k6pjp" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.917072 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-ck2fz" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.933642 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zpbbr" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.941087 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-lj4dw" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.944433 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-f56kd"] Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.949080 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5n2lb" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.958778 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.958971 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-bound-sa-token\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.959007 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/fcd4f024-3377-4bda-8dfd-bced91254447-csi-data-dir\") pod \"csi-hostpathplugin-4lmhx\" (UID: \"fcd4f024-3377-4bda-8dfd-bced91254447\") " pod="hostpath-provisioner/csi-hostpathplugin-4lmhx" Jan 05 21:53:36 crc kubenswrapper[4910]: E0105 21:53:36.959084 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:37.459054385 +0000 UTC m=+149.036552055 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.959185 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/3b0afa0a-d1fe-4c63-a25f-3fd39b954817-default-certificate\") pod \"router-default-5444994796-fcms5\" (UID: \"3b0afa0a-d1fe-4c63-a25f-3fd39b954817\") " pod="openshift-ingress/router-default-5444994796-fcms5" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.959233 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-registry-certificates\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.959262 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/ab37abef-211d-4733-8756-84bf4c9f9655-srv-cert\") pod \"catalog-operator-68c6474976-dflsj\" (UID: \"ab37abef-211d-4733-8756-84bf4c9f9655\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-dflsj" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.959453 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-ca-trust-extracted\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.959477 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zdbc5\" (UniqueName: \"kubernetes.io/projected/70202c0f-c1cb-4f24-a5ed-c8eaa0cddf2a-kube-api-access-zdbc5\") pod \"olm-operator-6b444d44fb-4zjkp\" (UID: \"70202c0f-c1cb-4f24-a5ed-c8eaa0cddf2a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zjkp" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.959497 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9l2wj\" (UniqueName: \"kubernetes.io/projected/11fe188c-97f7-4e0c-a84f-d95edc5f5404-kube-api-access-9l2wj\") pod \"service-ca-operator-777779d784-4hrg2\" (UID: \"11fe188c-97f7-4e0c-a84f-d95edc5f5404\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4hrg2" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.959563 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.959661 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndzlm\" (UniqueName: \"kubernetes.io/projected/4f960de5-cb6c-4b0f-84fb-969264facb30-kube-api-access-ndzlm\") pod \"machine-config-server-rzh7r\" (UID: \"4f960de5-cb6c-4b0f-84fb-969264facb30\") " pod="openshift-machine-config-operator/machine-config-server-rzh7r" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.959746 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-registry-tls\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.959818 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/11fe188c-97f7-4e0c-a84f-d95edc5f5404-serving-cert\") pod \"service-ca-operator-777779d784-4hrg2\" (UID: \"11fe188c-97f7-4e0c-a84f-d95edc5f5404\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4hrg2" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.964673 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-ca-trust-extracted\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:36 crc kubenswrapper[4910]: E0105 21:53:36.965768 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:37.46575082 +0000 UTC m=+149.043248490 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.967387 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w2n7t\" (UniqueName: \"kubernetes.io/projected/ab37abef-211d-4733-8756-84bf4c9f9655-kube-api-access-w2n7t\") pod \"catalog-operator-68c6474976-dflsj\" (UID: \"ab37abef-211d-4733-8756-84bf4c9f9655\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-dflsj" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.967457 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8cqtx\" (UniqueName: \"kubernetes.io/projected/fcd4f024-3377-4bda-8dfd-bced91254447-kube-api-access-8cqtx\") pod \"csi-hostpathplugin-4lmhx\" (UID: \"fcd4f024-3377-4bda-8dfd-bced91254447\") " pod="hostpath-provisioner/csi-hostpathplugin-4lmhx" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.967489 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nn99r\" (UniqueName: \"kubernetes.io/projected/9eb7ed0e-62c4-4cc2-b1b5-bfe1d5e38028-kube-api-access-nn99r\") pod \"service-ca-9c57cc56f-68wbd\" (UID: \"9eb7ed0e-62c4-4cc2-b1b5-bfe1d5e38028\") " pod="openshift-service-ca/service-ca-9c57cc56f-68wbd" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.967773 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4kb4\" (UniqueName: \"kubernetes.io/projected/3b0afa0a-d1fe-4c63-a25f-3fd39b954817-kube-api-access-s4kb4\") pod \"router-default-5444994796-fcms5\" (UID: \"3b0afa0a-d1fe-4c63-a25f-3fd39b954817\") " pod="openshift-ingress/router-default-5444994796-fcms5" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.968388 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3b17c4a3-a1fa-491b-ba02-fb1de889551b-metrics-tls\") pod \"dns-default-9pllm\" (UID: \"3b17c4a3-a1fa-491b-ba02-fb1de889551b\") " pod="openshift-dns/dns-default-9pllm" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.968633 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kj64f\" (UniqueName: \"kubernetes.io/projected/8f0966dc-62e7-4ee6-827c-f3435f1d5d9b-kube-api-access-kj64f\") pod \"ingress-canary-4qps7\" (UID: \"8f0966dc-62e7-4ee6-827c-f3435f1d5d9b\") " pod="openshift-ingress-canary/ingress-canary-4qps7" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.968806 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-65779\" (UniqueName: \"kubernetes.io/projected/3b17c4a3-a1fa-491b-ba02-fb1de889551b-kube-api-access-65779\") pod \"dns-default-9pllm\" (UID: \"3b17c4a3-a1fa-491b-ba02-fb1de889551b\") " pod="openshift-dns/dns-default-9pllm" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.968937 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s28cc\" (UniqueName: \"kubernetes.io/projected/c3f548c3-cff0-49a6-a800-78732bc54c37-kube-api-access-s28cc\") pod \"packageserver-d55dfcdfc-n8vrv\" (UID: \"c3f548c3-cff0-49a6-a800-78732bc54c37\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n8vrv" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.969071 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/3b0afa0a-d1fe-4c63-a25f-3fd39b954817-stats-auth\") pod \"router-default-5444994796-fcms5\" (UID: \"3b0afa0a-d1fe-4c63-a25f-3fd39b954817\") " pod="openshift-ingress/router-default-5444994796-fcms5" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.969236 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-85zxv\" (UniqueName: \"kubernetes.io/projected/738f7ea4-3ce0-44b3-8757-1ad261e59de3-kube-api-access-85zxv\") pod \"multus-admission-controller-857f4d67dd-b7j29\" (UID: \"738f7ea4-3ce0-44b3-8757-1ad261e59de3\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-b7j29" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.969740 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/11fe188c-97f7-4e0c-a84f-d95edc5f5404-config\") pod \"service-ca-operator-777779d784-4hrg2\" (UID: \"11fe188c-97f7-4e0c-a84f-d95edc5f5404\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4hrg2" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.969889 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ee40a1b4-967e-40aa-b6c0-eaf211346941-config-volume\") pod \"collect-profiles-29460825-hv5kn\" (UID: \"ee40a1b4-967e-40aa-b6c0-eaf211346941\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460825-hv5kn" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.970611 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-registry-certificates\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.971333 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/fcd4f024-3377-4bda-8dfd-bced91254447-socket-dir\") pod \"csi-hostpathplugin-4lmhx\" (UID: \"fcd4f024-3377-4bda-8dfd-bced91254447\") " pod="hostpath-provisioner/csi-hostpathplugin-4lmhx" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.972107 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/df73d562-aee4-4b56-b241-bd31f5c95714-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-llpdj\" (UID: \"df73d562-aee4-4b56-b241-bd31f5c95714\") " pod="openshift-marketplace/marketplace-operator-79b997595-llpdj" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.972243 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tx5p5\" (UniqueName: \"kubernetes.io/projected/ee40a1b4-967e-40aa-b6c0-eaf211346941-kube-api-access-tx5p5\") pod \"collect-profiles-29460825-hv5kn\" (UID: \"ee40a1b4-967e-40aa-b6c0-eaf211346941\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460825-hv5kn" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.972905 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/ab37abef-211d-4733-8756-84bf4c9f9655-profile-collector-cert\") pod \"catalog-operator-68c6474976-dflsj\" (UID: \"ab37abef-211d-4733-8756-84bf4c9f9655\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-dflsj" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.973010 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/4f960de5-cb6c-4b0f-84fb-969264facb30-certs\") pod \"machine-config-server-rzh7r\" (UID: \"4f960de5-cb6c-4b0f-84fb-969264facb30\") " pod="openshift-machine-config-operator/machine-config-server-rzh7r" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.973036 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/70202c0f-c1cb-4f24-a5ed-c8eaa0cddf2a-profile-collector-cert\") pod \"olm-operator-6b444d44fb-4zjkp\" (UID: \"70202c0f-c1cb-4f24-a5ed-c8eaa0cddf2a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zjkp" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.973053 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/738f7ea4-3ce0-44b3-8757-1ad261e59de3-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-b7j29\" (UID: \"738f7ea4-3ce0-44b3-8757-1ad261e59de3\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-b7j29" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.973051 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/3b0afa0a-d1fe-4c63-a25f-3fd39b954817-default-certificate\") pod \"router-default-5444994796-fcms5\" (UID: \"3b0afa0a-d1fe-4c63-a25f-3fd39b954817\") " pod="openshift-ingress/router-default-5444994796-fcms5" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.973090 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3b0afa0a-d1fe-4c63-a25f-3fd39b954817-service-ca-bundle\") pod \"router-default-5444994796-fcms5\" (UID: \"3b0afa0a-d1fe-4c63-a25f-3fd39b954817\") " pod="openshift-ingress/router-default-5444994796-fcms5" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.973135 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/df73d562-aee4-4b56-b241-bd31f5c95714-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-llpdj\" (UID: \"df73d562-aee4-4b56-b241-bd31f5c95714\") " pod="openshift-marketplace/marketplace-operator-79b997595-llpdj" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.973174 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/fcd4f024-3377-4bda-8dfd-bced91254447-mountpoint-dir\") pod \"csi-hostpathplugin-4lmhx\" (UID: \"fcd4f024-3377-4bda-8dfd-bced91254447\") " pod="hostpath-provisioner/csi-hostpathplugin-4lmhx" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.973199 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/5d128f8c-6ea3-4ba0-96bc-8fcd5aac98bf-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-9s4dd\" (UID: \"5d128f8c-6ea3-4ba0-96bc-8fcd5aac98bf\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9s4dd" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.973233 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ldcv\" (UniqueName: \"kubernetes.io/projected/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-kube-api-access-4ldcv\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.973254 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/9eb7ed0e-62c4-4cc2-b1b5-bfe1d5e38028-signing-cabundle\") pod \"service-ca-9c57cc56f-68wbd\" (UID: \"9eb7ed0e-62c4-4cc2-b1b5-bfe1d5e38028\") " pod="openshift-service-ca/service-ca-9c57cc56f-68wbd" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.973278 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/70202c0f-c1cb-4f24-a5ed-c8eaa0cddf2a-srv-cert\") pod \"olm-operator-6b444d44fb-4zjkp\" (UID: \"70202c0f-c1cb-4f24-a5ed-c8eaa0cddf2a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zjkp" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.973300 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/c3f548c3-cff0-49a6-a800-78732bc54c37-tmpfs\") pod \"packageserver-d55dfcdfc-n8vrv\" (UID: \"c3f548c3-cff0-49a6-a800-78732bc54c37\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n8vrv" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.973338 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-trusted-ca\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.973353 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ee40a1b4-967e-40aa-b6c0-eaf211346941-secret-volume\") pod \"collect-profiles-29460825-hv5kn\" (UID: \"ee40a1b4-967e-40aa-b6c0-eaf211346941\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460825-hv5kn" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.973369 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c3f548c3-cff0-49a6-a800-78732bc54c37-webhook-cert\") pod \"packageserver-d55dfcdfc-n8vrv\" (UID: \"c3f548c3-cff0-49a6-a800-78732bc54c37\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n8vrv" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.973419 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/fcd4f024-3377-4bda-8dfd-bced91254447-plugins-dir\") pod \"csi-hostpathplugin-4lmhx\" (UID: \"fcd4f024-3377-4bda-8dfd-bced91254447\") " pod="hostpath-provisioner/csi-hostpathplugin-4lmhx" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.973436 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3b17c4a3-a1fa-491b-ba02-fb1de889551b-config-volume\") pod \"dns-default-9pllm\" (UID: \"3b17c4a3-a1fa-491b-ba02-fb1de889551b\") " pod="openshift-dns/dns-default-9pllm" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.973460 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3b0afa0a-d1fe-4c63-a25f-3fd39b954817-metrics-certs\") pod \"router-default-5444994796-fcms5\" (UID: \"3b0afa0a-d1fe-4c63-a25f-3fd39b954817\") " pod="openshift-ingress/router-default-5444994796-fcms5" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.974793 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-installation-pull-secrets\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.974857 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/fcd4f024-3377-4bda-8dfd-bced91254447-registration-dir\") pod \"csi-hostpathplugin-4lmhx\" (UID: \"fcd4f024-3377-4bda-8dfd-bced91254447\") " pod="hostpath-provisioner/csi-hostpathplugin-4lmhx" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.974888 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/4f960de5-cb6c-4b0f-84fb-969264facb30-node-bootstrap-token\") pod \"machine-config-server-rzh7r\" (UID: \"4f960de5-cb6c-4b0f-84fb-969264facb30\") " pod="openshift-machine-config-operator/machine-config-server-rzh7r" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.974970 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8f0966dc-62e7-4ee6-827c-f3435f1d5d9b-cert\") pod \"ingress-canary-4qps7\" (UID: \"8f0966dc-62e7-4ee6-827c-f3435f1d5d9b\") " pod="openshift-ingress-canary/ingress-canary-4qps7" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.975015 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dc5zf\" (UniqueName: \"kubernetes.io/projected/df73d562-aee4-4b56-b241-bd31f5c95714-kube-api-access-dc5zf\") pod \"marketplace-operator-79b997595-llpdj\" (UID: \"df73d562-aee4-4b56-b241-bd31f5c95714\") " pod="openshift-marketplace/marketplace-operator-79b997595-llpdj" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.975974 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-trusted-ca\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.976223 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/3b0afa0a-d1fe-4c63-a25f-3fd39b954817-stats-auth\") pod \"router-default-5444994796-fcms5\" (UID: \"3b0afa0a-d1fe-4c63-a25f-3fd39b954817\") " pod="openshift-ingress/router-default-5444994796-fcms5" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.976409 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-registry-tls\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.976469 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c3f548c3-cff0-49a6-a800-78732bc54c37-apiservice-cert\") pod \"packageserver-d55dfcdfc-n8vrv\" (UID: \"c3f548c3-cff0-49a6-a800-78732bc54c37\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n8vrv" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.976715 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/9eb7ed0e-62c4-4cc2-b1b5-bfe1d5e38028-signing-key\") pod \"service-ca-9c57cc56f-68wbd\" (UID: \"9eb7ed0e-62c4-4cc2-b1b5-bfe1d5e38028\") " pod="openshift-service-ca/service-ca-9c57cc56f-68wbd" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.977037 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfs5k\" (UniqueName: \"kubernetes.io/projected/5d128f8c-6ea3-4ba0-96bc-8fcd5aac98bf-kube-api-access-pfs5k\") pod \"control-plane-machine-set-operator-78cbb6b69f-9s4dd\" (UID: \"5d128f8c-6ea3-4ba0-96bc-8fcd5aac98bf\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9s4dd" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.998622 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-w9k7n" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.998723 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-installation-pull-secrets\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:36 crc kubenswrapper[4910]: I0105 21:53:36.998975 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/5d128f8c-6ea3-4ba0-96bc-8fcd5aac98bf-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-9s4dd\" (UID: \"5d128f8c-6ea3-4ba0-96bc-8fcd5aac98bf\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9s4dd" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.000215 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jwxvn" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.001185 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3b0afa0a-d1fe-4c63-a25f-3fd39b954817-metrics-certs\") pod \"router-default-5444994796-fcms5\" (UID: \"3b0afa0a-d1fe-4c63-a25f-3fd39b954817\") " pod="openshift-ingress/router-default-5444994796-fcms5" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.003045 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8"] Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.003087 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-44vj4"] Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.004716 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3b0afa0a-d1fe-4c63-a25f-3fd39b954817-service-ca-bundle\") pod \"router-default-5444994796-fcms5\" (UID: \"3b0afa0a-d1fe-4c63-a25f-3fd39b954817\") " pod="openshift-ingress/router-default-5444994796-fcms5" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.015842 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-bound-sa-token\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.025976 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4kb4\" (UniqueName: \"kubernetes.io/projected/3b0afa0a-d1fe-4c63-a25f-3fd39b954817-kube-api-access-s4kb4\") pod \"router-default-5444994796-fcms5\" (UID: \"3b0afa0a-d1fe-4c63-a25f-3fd39b954817\") " pod="openshift-ingress/router-default-5444994796-fcms5" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.062099 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ldcv\" (UniqueName: \"kubernetes.io/projected/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-kube-api-access-4ldcv\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.075916 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfs5k\" (UniqueName: \"kubernetes.io/projected/5d128f8c-6ea3-4ba0-96bc-8fcd5aac98bf-kube-api-access-pfs5k\") pod \"control-plane-machine-set-operator-78cbb6b69f-9s4dd\" (UID: \"5d128f8c-6ea3-4ba0-96bc-8fcd5aac98bf\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9s4dd" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.078911 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:37 crc kubenswrapper[4910]: E0105 21:53:37.079099 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:37.57905172 +0000 UTC m=+149.156549390 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.079185 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/fcd4f024-3377-4bda-8dfd-bced91254447-registration-dir\") pod \"csi-hostpathplugin-4lmhx\" (UID: \"fcd4f024-3377-4bda-8dfd-bced91254447\") " pod="hostpath-provisioner/csi-hostpathplugin-4lmhx" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.079232 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/4f960de5-cb6c-4b0f-84fb-969264facb30-node-bootstrap-token\") pod \"machine-config-server-rzh7r\" (UID: \"4f960de5-cb6c-4b0f-84fb-969264facb30\") " pod="openshift-machine-config-operator/machine-config-server-rzh7r" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.079258 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8f0966dc-62e7-4ee6-827c-f3435f1d5d9b-cert\") pod \"ingress-canary-4qps7\" (UID: \"8f0966dc-62e7-4ee6-827c-f3435f1d5d9b\") " pod="openshift-ingress-canary/ingress-canary-4qps7" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.079282 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dc5zf\" (UniqueName: \"kubernetes.io/projected/df73d562-aee4-4b56-b241-bd31f5c95714-kube-api-access-dc5zf\") pod \"marketplace-operator-79b997595-llpdj\" (UID: \"df73d562-aee4-4b56-b241-bd31f5c95714\") " pod="openshift-marketplace/marketplace-operator-79b997595-llpdj" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.079311 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c3f548c3-cff0-49a6-a800-78732bc54c37-apiservice-cert\") pod \"packageserver-d55dfcdfc-n8vrv\" (UID: \"c3f548c3-cff0-49a6-a800-78732bc54c37\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n8vrv" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.079332 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/9eb7ed0e-62c4-4cc2-b1b5-bfe1d5e38028-signing-key\") pod \"service-ca-9c57cc56f-68wbd\" (UID: \"9eb7ed0e-62c4-4cc2-b1b5-bfe1d5e38028\") " pod="openshift-service-ca/service-ca-9c57cc56f-68wbd" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.079360 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/fcd4f024-3377-4bda-8dfd-bced91254447-csi-data-dir\") pod \"csi-hostpathplugin-4lmhx\" (UID: \"fcd4f024-3377-4bda-8dfd-bced91254447\") " pod="hostpath-provisioner/csi-hostpathplugin-4lmhx" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.079387 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/ab37abef-211d-4733-8756-84bf4c9f9655-srv-cert\") pod \"catalog-operator-68c6474976-dflsj\" (UID: \"ab37abef-211d-4733-8756-84bf4c9f9655\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-dflsj" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.079439 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zdbc5\" (UniqueName: \"kubernetes.io/projected/70202c0f-c1cb-4f24-a5ed-c8eaa0cddf2a-kube-api-access-zdbc5\") pod \"olm-operator-6b444d44fb-4zjkp\" (UID: \"70202c0f-c1cb-4f24-a5ed-c8eaa0cddf2a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zjkp" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.079460 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9l2wj\" (UniqueName: \"kubernetes.io/projected/11fe188c-97f7-4e0c-a84f-d95edc5f5404-kube-api-access-9l2wj\") pod \"service-ca-operator-777779d784-4hrg2\" (UID: \"11fe188c-97f7-4e0c-a84f-d95edc5f5404\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4hrg2" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.079497 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.079520 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ndzlm\" (UniqueName: \"kubernetes.io/projected/4f960de5-cb6c-4b0f-84fb-969264facb30-kube-api-access-ndzlm\") pod \"machine-config-server-rzh7r\" (UID: \"4f960de5-cb6c-4b0f-84fb-969264facb30\") " pod="openshift-machine-config-operator/machine-config-server-rzh7r" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.079555 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/11fe188c-97f7-4e0c-a84f-d95edc5f5404-serving-cert\") pod \"service-ca-operator-777779d784-4hrg2\" (UID: \"11fe188c-97f7-4e0c-a84f-d95edc5f5404\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4hrg2" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.079581 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w2n7t\" (UniqueName: \"kubernetes.io/projected/ab37abef-211d-4733-8756-84bf4c9f9655-kube-api-access-w2n7t\") pod \"catalog-operator-68c6474976-dflsj\" (UID: \"ab37abef-211d-4733-8756-84bf4c9f9655\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-dflsj" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.079606 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8cqtx\" (UniqueName: \"kubernetes.io/projected/fcd4f024-3377-4bda-8dfd-bced91254447-kube-api-access-8cqtx\") pod \"csi-hostpathplugin-4lmhx\" (UID: \"fcd4f024-3377-4bda-8dfd-bced91254447\") " pod="hostpath-provisioner/csi-hostpathplugin-4lmhx" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.079631 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nn99r\" (UniqueName: \"kubernetes.io/projected/9eb7ed0e-62c4-4cc2-b1b5-bfe1d5e38028-kube-api-access-nn99r\") pod \"service-ca-9c57cc56f-68wbd\" (UID: \"9eb7ed0e-62c4-4cc2-b1b5-bfe1d5e38028\") " pod="openshift-service-ca/service-ca-9c57cc56f-68wbd" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.079676 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3b17c4a3-a1fa-491b-ba02-fb1de889551b-metrics-tls\") pod \"dns-default-9pllm\" (UID: \"3b17c4a3-a1fa-491b-ba02-fb1de889551b\") " pod="openshift-dns/dns-default-9pllm" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.079704 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kj64f\" (UniqueName: \"kubernetes.io/projected/8f0966dc-62e7-4ee6-827c-f3435f1d5d9b-kube-api-access-kj64f\") pod \"ingress-canary-4qps7\" (UID: \"8f0966dc-62e7-4ee6-827c-f3435f1d5d9b\") " pod="openshift-ingress-canary/ingress-canary-4qps7" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.079723 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-65779\" (UniqueName: \"kubernetes.io/projected/3b17c4a3-a1fa-491b-ba02-fb1de889551b-kube-api-access-65779\") pod \"dns-default-9pllm\" (UID: \"3b17c4a3-a1fa-491b-ba02-fb1de889551b\") " pod="openshift-dns/dns-default-9pllm" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.079756 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s28cc\" (UniqueName: \"kubernetes.io/projected/c3f548c3-cff0-49a6-a800-78732bc54c37-kube-api-access-s28cc\") pod \"packageserver-d55dfcdfc-n8vrv\" (UID: \"c3f548c3-cff0-49a6-a800-78732bc54c37\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n8vrv" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.079798 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-85zxv\" (UniqueName: \"kubernetes.io/projected/738f7ea4-3ce0-44b3-8757-1ad261e59de3-kube-api-access-85zxv\") pod \"multus-admission-controller-857f4d67dd-b7j29\" (UID: \"738f7ea4-3ce0-44b3-8757-1ad261e59de3\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-b7j29" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.079817 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/11fe188c-97f7-4e0c-a84f-d95edc5f5404-config\") pod \"service-ca-operator-777779d784-4hrg2\" (UID: \"11fe188c-97f7-4e0c-a84f-d95edc5f5404\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4hrg2" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.079837 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ee40a1b4-967e-40aa-b6c0-eaf211346941-config-volume\") pod \"collect-profiles-29460825-hv5kn\" (UID: \"ee40a1b4-967e-40aa-b6c0-eaf211346941\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460825-hv5kn" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.079863 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/fcd4f024-3377-4bda-8dfd-bced91254447-socket-dir\") pod \"csi-hostpathplugin-4lmhx\" (UID: \"fcd4f024-3377-4bda-8dfd-bced91254447\") " pod="hostpath-provisioner/csi-hostpathplugin-4lmhx" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.079891 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/df73d562-aee4-4b56-b241-bd31f5c95714-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-llpdj\" (UID: \"df73d562-aee4-4b56-b241-bd31f5c95714\") " pod="openshift-marketplace/marketplace-operator-79b997595-llpdj" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.079910 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tx5p5\" (UniqueName: \"kubernetes.io/projected/ee40a1b4-967e-40aa-b6c0-eaf211346941-kube-api-access-tx5p5\") pod \"collect-profiles-29460825-hv5kn\" (UID: \"ee40a1b4-967e-40aa-b6c0-eaf211346941\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460825-hv5kn" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.079930 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/ab37abef-211d-4733-8756-84bf4c9f9655-profile-collector-cert\") pod \"catalog-operator-68c6474976-dflsj\" (UID: \"ab37abef-211d-4733-8756-84bf4c9f9655\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-dflsj" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.079950 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/4f960de5-cb6c-4b0f-84fb-969264facb30-certs\") pod \"machine-config-server-rzh7r\" (UID: \"4f960de5-cb6c-4b0f-84fb-969264facb30\") " pod="openshift-machine-config-operator/machine-config-server-rzh7r" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.079971 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/df73d562-aee4-4b56-b241-bd31f5c95714-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-llpdj\" (UID: \"df73d562-aee4-4b56-b241-bd31f5c95714\") " pod="openshift-marketplace/marketplace-operator-79b997595-llpdj" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.079989 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/70202c0f-c1cb-4f24-a5ed-c8eaa0cddf2a-profile-collector-cert\") pod \"olm-operator-6b444d44fb-4zjkp\" (UID: \"70202c0f-c1cb-4f24-a5ed-c8eaa0cddf2a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zjkp" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.080009 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/738f7ea4-3ce0-44b3-8757-1ad261e59de3-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-b7j29\" (UID: \"738f7ea4-3ce0-44b3-8757-1ad261e59de3\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-b7j29" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.080029 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/fcd4f024-3377-4bda-8dfd-bced91254447-mountpoint-dir\") pod \"csi-hostpathplugin-4lmhx\" (UID: \"fcd4f024-3377-4bda-8dfd-bced91254447\") " pod="hostpath-provisioner/csi-hostpathplugin-4lmhx" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.080051 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/9eb7ed0e-62c4-4cc2-b1b5-bfe1d5e38028-signing-cabundle\") pod \"service-ca-9c57cc56f-68wbd\" (UID: \"9eb7ed0e-62c4-4cc2-b1b5-bfe1d5e38028\") " pod="openshift-service-ca/service-ca-9c57cc56f-68wbd" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.080067 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/70202c0f-c1cb-4f24-a5ed-c8eaa0cddf2a-srv-cert\") pod \"olm-operator-6b444d44fb-4zjkp\" (UID: \"70202c0f-c1cb-4f24-a5ed-c8eaa0cddf2a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zjkp" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.080082 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/c3f548c3-cff0-49a6-a800-78732bc54c37-tmpfs\") pod \"packageserver-d55dfcdfc-n8vrv\" (UID: \"c3f548c3-cff0-49a6-a800-78732bc54c37\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n8vrv" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.080104 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c3f548c3-cff0-49a6-a800-78732bc54c37-webhook-cert\") pod \"packageserver-d55dfcdfc-n8vrv\" (UID: \"c3f548c3-cff0-49a6-a800-78732bc54c37\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n8vrv" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.080141 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ee40a1b4-967e-40aa-b6c0-eaf211346941-secret-volume\") pod \"collect-profiles-29460825-hv5kn\" (UID: \"ee40a1b4-967e-40aa-b6c0-eaf211346941\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460825-hv5kn" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.080159 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/fcd4f024-3377-4bda-8dfd-bced91254447-plugins-dir\") pod \"csi-hostpathplugin-4lmhx\" (UID: \"fcd4f024-3377-4bda-8dfd-bced91254447\") " pod="hostpath-provisioner/csi-hostpathplugin-4lmhx" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.080165 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/fcd4f024-3377-4bda-8dfd-bced91254447-csi-data-dir\") pod \"csi-hostpathplugin-4lmhx\" (UID: \"fcd4f024-3377-4bda-8dfd-bced91254447\") " pod="hostpath-provisioner/csi-hostpathplugin-4lmhx" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.080175 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3b17c4a3-a1fa-491b-ba02-fb1de889551b-config-volume\") pod \"dns-default-9pllm\" (UID: \"3b17c4a3-a1fa-491b-ba02-fb1de889551b\") " pod="openshift-dns/dns-default-9pllm" Jan 05 21:53:37 crc kubenswrapper[4910]: E0105 21:53:37.080539 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:37.580522225 +0000 UTC m=+149.158019895 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.081316 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/fcd4f024-3377-4bda-8dfd-bced91254447-registration-dir\") pod \"csi-hostpathplugin-4lmhx\" (UID: \"fcd4f024-3377-4bda-8dfd-bced91254447\") " pod="hostpath-provisioner/csi-hostpathplugin-4lmhx" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.082018 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/fcd4f024-3377-4bda-8dfd-bced91254447-socket-dir\") pod \"csi-hostpathplugin-4lmhx\" (UID: \"fcd4f024-3377-4bda-8dfd-bced91254447\") " pod="hostpath-provisioner/csi-hostpathplugin-4lmhx" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.082759 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/c3f548c3-cff0-49a6-a800-78732bc54c37-tmpfs\") pod \"packageserver-d55dfcdfc-n8vrv\" (UID: \"c3f548c3-cff0-49a6-a800-78732bc54c37\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n8vrv" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.083104 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/fcd4f024-3377-4bda-8dfd-bced91254447-plugins-dir\") pod \"csi-hostpathplugin-4lmhx\" (UID: \"fcd4f024-3377-4bda-8dfd-bced91254447\") " pod="hostpath-provisioner/csi-hostpathplugin-4lmhx" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.084147 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3b17c4a3-a1fa-491b-ba02-fb1de889551b-config-volume\") pod \"dns-default-9pllm\" (UID: \"3b17c4a3-a1fa-491b-ba02-fb1de889551b\") " pod="openshift-dns/dns-default-9pllm" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.084195 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/fcd4f024-3377-4bda-8dfd-bced91254447-mountpoint-dir\") pod \"csi-hostpathplugin-4lmhx\" (UID: \"fcd4f024-3377-4bda-8dfd-bced91254447\") " pod="hostpath-provisioner/csi-hostpathplugin-4lmhx" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.091183 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c3f548c3-cff0-49a6-a800-78732bc54c37-apiservice-cert\") pod \"packageserver-d55dfcdfc-n8vrv\" (UID: \"c3f548c3-cff0-49a6-a800-78732bc54c37\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n8vrv" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.092947 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/4f960de5-cb6c-4b0f-84fb-969264facb30-node-bootstrap-token\") pod \"machine-config-server-rzh7r\" (UID: \"4f960de5-cb6c-4b0f-84fb-969264facb30\") " pod="openshift-machine-config-operator/machine-config-server-rzh7r" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.094074 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/9eb7ed0e-62c4-4cc2-b1b5-bfe1d5e38028-signing-key\") pod \"service-ca-9c57cc56f-68wbd\" (UID: \"9eb7ed0e-62c4-4cc2-b1b5-bfe1d5e38028\") " pod="openshift-service-ca/service-ca-9c57cc56f-68wbd" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.094626 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/9eb7ed0e-62c4-4cc2-b1b5-bfe1d5e38028-signing-cabundle\") pod \"service-ca-9c57cc56f-68wbd\" (UID: \"9eb7ed0e-62c4-4cc2-b1b5-bfe1d5e38028\") " pod="openshift-service-ca/service-ca-9c57cc56f-68wbd" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.094696 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/df73d562-aee4-4b56-b241-bd31f5c95714-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-llpdj\" (UID: \"df73d562-aee4-4b56-b241-bd31f5c95714\") " pod="openshift-marketplace/marketplace-operator-79b997595-llpdj" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.094806 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/11fe188c-97f7-4e0c-a84f-d95edc5f5404-config\") pod \"service-ca-operator-777779d784-4hrg2\" (UID: \"11fe188c-97f7-4e0c-a84f-d95edc5f5404\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4hrg2" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.094919 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ee40a1b4-967e-40aa-b6c0-eaf211346941-config-volume\") pod \"collect-profiles-29460825-hv5kn\" (UID: \"ee40a1b4-967e-40aa-b6c0-eaf211346941\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460825-hv5kn" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.095610 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/ab37abef-211d-4733-8756-84bf4c9f9655-profile-collector-cert\") pod \"catalog-operator-68c6474976-dflsj\" (UID: \"ab37abef-211d-4733-8756-84bf4c9f9655\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-dflsj" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.095756 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ee40a1b4-967e-40aa-b6c0-eaf211346941-secret-volume\") pod \"collect-profiles-29460825-hv5kn\" (UID: \"ee40a1b4-967e-40aa-b6c0-eaf211346941\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460825-hv5kn" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.096261 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/11fe188c-97f7-4e0c-a84f-d95edc5f5404-serving-cert\") pod \"service-ca-operator-777779d784-4hrg2\" (UID: \"11fe188c-97f7-4e0c-a84f-d95edc5f5404\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4hrg2" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.098033 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/70202c0f-c1cb-4f24-a5ed-c8eaa0cddf2a-srv-cert\") pod \"olm-operator-6b444d44fb-4zjkp\" (UID: \"70202c0f-c1cb-4f24-a5ed-c8eaa0cddf2a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zjkp" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.098939 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8f0966dc-62e7-4ee6-827c-f3435f1d5d9b-cert\") pod \"ingress-canary-4qps7\" (UID: \"8f0966dc-62e7-4ee6-827c-f3435f1d5d9b\") " pod="openshift-ingress-canary/ingress-canary-4qps7" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.099989 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3b17c4a3-a1fa-491b-ba02-fb1de889551b-metrics-tls\") pod \"dns-default-9pllm\" (UID: \"3b17c4a3-a1fa-491b-ba02-fb1de889551b\") " pod="openshift-dns/dns-default-9pllm" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.100334 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/70202c0f-c1cb-4f24-a5ed-c8eaa0cddf2a-profile-collector-cert\") pod \"olm-operator-6b444d44fb-4zjkp\" (UID: \"70202c0f-c1cb-4f24-a5ed-c8eaa0cddf2a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zjkp" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.100754 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/df73d562-aee4-4b56-b241-bd31f5c95714-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-llpdj\" (UID: \"df73d562-aee4-4b56-b241-bd31f5c95714\") " pod="openshift-marketplace/marketplace-operator-79b997595-llpdj" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.101565 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/ab37abef-211d-4733-8756-84bf4c9f9655-srv-cert\") pod \"catalog-operator-68c6474976-dflsj\" (UID: \"ab37abef-211d-4733-8756-84bf4c9f9655\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-dflsj" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.102357 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/738f7ea4-3ce0-44b3-8757-1ad261e59de3-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-b7j29\" (UID: \"738f7ea4-3ce0-44b3-8757-1ad261e59de3\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-b7j29" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.110761 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/4f960de5-cb6c-4b0f-84fb-969264facb30-certs\") pod \"machine-config-server-rzh7r\" (UID: \"4f960de5-cb6c-4b0f-84fb-969264facb30\") " pod="openshift-machine-config-operator/machine-config-server-rzh7r" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.118363 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nn99r\" (UniqueName: \"kubernetes.io/projected/9eb7ed0e-62c4-4cc2-b1b5-bfe1d5e38028-kube-api-access-nn99r\") pod \"service-ca-9c57cc56f-68wbd\" (UID: \"9eb7ed0e-62c4-4cc2-b1b5-bfe1d5e38028\") " pod="openshift-service-ca/service-ca-9c57cc56f-68wbd" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.121973 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c3f548c3-cff0-49a6-a800-78732bc54c37-webhook-cert\") pod \"packageserver-d55dfcdfc-n8vrv\" (UID: \"c3f548c3-cff0-49a6-a800-78732bc54c37\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n8vrv" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.137512 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9l2wj\" (UniqueName: \"kubernetes.io/projected/11fe188c-97f7-4e0c-a84f-d95edc5f5404-kube-api-access-9l2wj\") pod \"service-ca-operator-777779d784-4hrg2\" (UID: \"11fe188c-97f7-4e0c-a84f-d95edc5f5404\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4hrg2" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.151003 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4s99g"] Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.155748 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w2n7t\" (UniqueName: \"kubernetes.io/projected/ab37abef-211d-4733-8756-84bf4c9f9655-kube-api-access-w2n7t\") pod \"catalog-operator-68c6474976-dflsj\" (UID: \"ab37abef-211d-4733-8756-84bf4c9f9655\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-dflsj" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.171832 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-fcms5" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.181447 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:37 crc kubenswrapper[4910]: E0105 21:53:37.181968 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:37.681948302 +0000 UTC m=+149.259445962 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.183078 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8cqtx\" (UniqueName: \"kubernetes.io/projected/fcd4f024-3377-4bda-8dfd-bced91254447-kube-api-access-8cqtx\") pod \"csi-hostpathplugin-4lmhx\" (UID: \"fcd4f024-3377-4bda-8dfd-bced91254447\") " pod="hostpath-provisioner/csi-hostpathplugin-4lmhx" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.201732 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-85zxv\" (UniqueName: \"kubernetes.io/projected/738f7ea4-3ce0-44b3-8757-1ad261e59de3-kube-api-access-85zxv\") pod \"multus-admission-controller-857f4d67dd-b7j29\" (UID: \"738f7ea4-3ce0-44b3-8757-1ad261e59de3\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-b7j29" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.213890 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-sfnhj"] Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.217488 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-j9xtz"] Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.233751 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dc5zf\" (UniqueName: \"kubernetes.io/projected/df73d562-aee4-4b56-b241-bd31f5c95714-kube-api-access-dc5zf\") pod \"marketplace-operator-79b997595-llpdj\" (UID: \"df73d562-aee4-4b56-b241-bd31f5c95714\") " pod="openshift-marketplace/marketplace-operator-79b997595-llpdj" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.249953 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-pz96p"] Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.278012 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9s4dd" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.284678 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:37 crc kubenswrapper[4910]: E0105 21:53:37.286038 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:37.785990019 +0000 UTC m=+149.363487689 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.291622 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cbfc7"] Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.297389 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zdbc5\" (UniqueName: \"kubernetes.io/projected/70202c0f-c1cb-4f24-a5ed-c8eaa0cddf2a-kube-api-access-zdbc5\") pod \"olm-operator-6b444d44fb-4zjkp\" (UID: \"70202c0f-c1cb-4f24-a5ed-c8eaa0cddf2a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zjkp" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.305761 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-dflsj" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.307151 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-k2d98"] Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.308705 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-b7j29" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.317684 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zjkp" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.326553 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-llpdj" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.330492 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-6d5lf"] Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.331035 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-65779\" (UniqueName: \"kubernetes.io/projected/3b17c4a3-a1fa-491b-ba02-fb1de889551b-kube-api-access-65779\") pod \"dns-default-9pllm\" (UID: \"3b17c4a3-a1fa-491b-ba02-fb1de889551b\") " pod="openshift-dns/dns-default-9pllm" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.339991 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kj64f\" (UniqueName: \"kubernetes.io/projected/8f0966dc-62e7-4ee6-827c-f3435f1d5d9b-kube-api-access-kj64f\") pod \"ingress-canary-4qps7\" (UID: \"8f0966dc-62e7-4ee6-827c-f3435f1d5d9b\") " pod="openshift-ingress-canary/ingress-canary-4qps7" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.343269 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndzlm\" (UniqueName: \"kubernetes.io/projected/4f960de5-cb6c-4b0f-84fb-969264facb30-kube-api-access-ndzlm\") pod \"machine-config-server-rzh7r\" (UID: \"4f960de5-cb6c-4b0f-84fb-969264facb30\") " pod="openshift-machine-config-operator/machine-config-server-rzh7r" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.345898 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tx5p5\" (UniqueName: \"kubernetes.io/projected/ee40a1b4-967e-40aa-b6c0-eaf211346941-kube-api-access-tx5p5\") pod \"collect-profiles-29460825-hv5kn\" (UID: \"ee40a1b4-967e-40aa-b6c0-eaf211346941\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460825-hv5kn" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.348388 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4hrg2" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.365734 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-4lmhx" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.366852 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s28cc\" (UniqueName: \"kubernetes.io/projected/c3f548c3-cff0-49a6-a800-78732bc54c37-kube-api-access-s28cc\") pod \"packageserver-d55dfcdfc-n8vrv\" (UID: \"c3f548c3-cff0-49a6-a800-78732bc54c37\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n8vrv" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.374536 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-68wbd" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.382257 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-9pllm" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.388560 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:37 crc kubenswrapper[4910]: E0105 21:53:37.388710 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:37.888681534 +0000 UTC m=+149.466179204 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.389342 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:37 crc kubenswrapper[4910]: E0105 21:53:37.391718 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:37.891699786 +0000 UTC m=+149.469197456 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.395646 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-4qps7" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.404604 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-rzh7r" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.493931 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:37 crc kubenswrapper[4910]: E0105 21:53:37.494271 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:37.994248877 +0000 UTC m=+149.571746547 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.603968 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:37 crc kubenswrapper[4910]: E0105 21:53:37.604497 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:38.104478824 +0000 UTC m=+149.681976494 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.624897 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-ktdtg"] Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.634419 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cbfc7" event={"ID":"00fa008c-8f60-4ec4-ba95-a58e71658276","Type":"ContainerStarted","Data":"b7771b6d17bcae994eb6481a69a390b56fc03daa96f2923a7f30b06015f86d64"} Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.634944 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n8vrv" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.639191 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-sfnhj" event={"ID":"12041e15-b6da-4d62-b434-ceb0e39480a6","Type":"ContainerStarted","Data":"7f34147a70a64568bdcc9e79d7f90796ff76d161d3e82e067b9de3ddee9eb048"} Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.641625 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460825-hv5kn" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.655271 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-fcms5" event={"ID":"3b0afa0a-d1fe-4c63-a25f-3fd39b954817","Type":"ContainerStarted","Data":"077522c5d5acf21c44376e57c9e5ce87a587ccf37dbbf9a957bec5294e5a79bd"} Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.677295 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" event={"ID":"b9194562-89b4-49cc-b0d2-7875fd2640d8","Type":"ContainerStarted","Data":"87d7684d7a49a800c166798f456eff580b52373ec7e46b2cabeac4ccda287cc9"} Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.690094 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-8z8h7" event={"ID":"f869ba01-9cc5-403c-a234-7a6e4864c8fb","Type":"ContainerStarted","Data":"d60e7239fccd4b14e2bdf0aaf7ec48b4cedc6353b047502aace196e9f76c5392"} Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.690178 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-8z8h7" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.723505 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-8z8h7" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.725064 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:37 crc kubenswrapper[4910]: E0105 21:53:37.725465 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:38.225442859 +0000 UTC m=+149.802940529 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.744482 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-47djl" event={"ID":"42cd7d43-1bdf-4961-bcae-6f638a83b8e0","Type":"ContainerStarted","Data":"40a19afadd9f033d09dd22a9896464678396a612f5ab0e24d598f951325a56d1"} Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.751755 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-g5xxj" event={"ID":"8cef9cdb-d8f8-406b-8575-6a6d1b72a638","Type":"ContainerStarted","Data":"b74bab86f9cf34833f34affea1f044a4bfa513efc09a52b967914d96933b751c"} Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.751815 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-g5xxj" event={"ID":"8cef9cdb-d8f8-406b-8575-6a6d1b72a638","Type":"ContainerStarted","Data":"9aad3e13781f0506c1503b0336bb38868f3c6f8405d163e0b2c55e5c947968b6"} Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.763016 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-f56kd" event={"ID":"9f9b9ae8-cbde-41e1-b829-c1511c7f2973","Type":"ContainerStarted","Data":"3621c16605253dd9e2c60811fe002020798c8dc6a1fa413bb0ecccddb77814d3"} Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.767757 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" event={"ID":"aa805313-499f-47e9-8ffa-827fb2664a71","Type":"ContainerStarted","Data":"11f3001fe15eacfac46a955935960d39350713a51a8a037908c80711de6c0e5f"} Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.768929 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.775820 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jh8zt" event={"ID":"37a900bd-079a-4b57-a7e6-e12a71e50d2f","Type":"ContainerStarted","Data":"101296c6d0d347a2e74eb683900115dc97fd07181a72c2e42cdb5a234d73491a"} Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.775872 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jh8zt" event={"ID":"37a900bd-079a-4b57-a7e6-e12a71e50d2f","Type":"ContainerStarted","Data":"c5e3bce11268425e9a16bdb6fc3e521c3ed9707f9d6ad5f0d00d15fbb50e71ac"} Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.778271 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m9xnz" event={"ID":"d856811b-12c9-4b55-bf0d-3da687639b65","Type":"ContainerStarted","Data":"782d73c8ab917ec1f02ebc1495e14c0f68794bb1341639ee77f9fb5bf18a0c1e"} Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.778323 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m9xnz" event={"ID":"d856811b-12c9-4b55-bf0d-3da687639b65","Type":"ContainerStarted","Data":"4fa206c7fa4468ec41bed950df9abe1a449db54ff133ed0ae3afd86ef02a4f55"} Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.784303 4910 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-gqzj7 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.10:6443/healthz\": dial tcp 10.217.0.10:6443: connect: connection refused" start-of-body= Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.784353 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" podUID="aa805313-499f-47e9-8ffa-827fb2664a71" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.10:6443/healthz\": dial tcp 10.217.0.10:6443: connect: connection refused" Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.786188 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-82zlp" event={"ID":"69e2d768-6b62-446e-a239-4b221ba0a979","Type":"ContainerStarted","Data":"48be7083d499487cea19d82570c936c6794cffbdfa1cad1470f8d110b35b112a"} Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.793222 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-44vj4" event={"ID":"4f414412-c2c0-4fea-a255-2444675c6f5e","Type":"ContainerStarted","Data":"72c83eac82845eea296ee11c19e05c5bee3a19d315821551e7229e373576d933"} Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.836185 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:37 crc kubenswrapper[4910]: E0105 21:53:37.845359 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:38.345339171 +0000 UTC m=+149.922836841 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.863469 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9tbp2" event={"ID":"c897d56d-7140-4aae-b1df-288502d6c78c","Type":"ContainerStarted","Data":"709286d28f5bf7f761aefdd1b0081ee59edec6981b2e968731b0fedf7f7a4b29"} Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.866456 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9tbp2" event={"ID":"c897d56d-7140-4aae-b1df-288502d6c78c","Type":"ContainerStarted","Data":"6c909789daf5972fe71451211a378fe08cdf62358eea5b045a12597b18a4602c"} Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.866821 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4s99g" event={"ID":"56ef4f14-9acd-41fe-894d-5fbbe990da8e","Type":"ContainerStarted","Data":"a60509060dc475309dac41eb0e1c753c5fae4b03b97e46f95f728d1904ca7204"} Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.920289 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-ck2fz"] Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.938647 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:37 crc kubenswrapper[4910]: E0105 21:53:37.942097 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:38.442055974 +0000 UTC m=+150.019553644 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:37 crc kubenswrapper[4910]: I0105 21:53:37.943221 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:37 crc kubenswrapper[4910]: E0105 21:53:37.943638 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:38.443614571 +0000 UTC m=+150.021112251 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:38 crc kubenswrapper[4910]: I0105 21:53:38.050368 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:38 crc kubenswrapper[4910]: E0105 21:53:38.050697 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:38.55066481 +0000 UTC m=+150.128162480 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:38 crc kubenswrapper[4910]: I0105 21:53:38.051222 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:38 crc kubenswrapper[4910]: E0105 21:53:38.055867 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:38.555846719 +0000 UTC m=+150.133344389 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:38 crc kubenswrapper[4910]: I0105 21:53:38.070004 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-lj4dw"] Jan 05 21:53:38 crc kubenswrapper[4910]: I0105 21:53:38.081761 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-8ncwl"] Jan 05 21:53:38 crc kubenswrapper[4910]: I0105 21:53:38.097422 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-zpbbr"] Jan 05 21:53:38 crc kubenswrapper[4910]: I0105 21:53:38.163575 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:38 crc kubenswrapper[4910]: E0105 21:53:38.165537 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:38.665477687 +0000 UTC m=+150.242975377 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:38 crc kubenswrapper[4910]: I0105 21:53:38.281809 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:38 crc kubenswrapper[4910]: E0105 21:53:38.282333 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:38.782318206 +0000 UTC m=+150.359815876 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:38 crc kubenswrapper[4910]: I0105 21:53:38.379944 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jwxvn"] Jan 05 21:53:38 crc kubenswrapper[4910]: I0105 21:53:38.383272 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:38 crc kubenswrapper[4910]: E0105 21:53:38.394369 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:38.894321796 +0000 UTC m=+150.471819476 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:38 crc kubenswrapper[4910]: I0105 21:53:38.394885 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:38 crc kubenswrapper[4910]: E0105 21:53:38.395454 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:38.89543768 +0000 UTC m=+150.472935340 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:38 crc kubenswrapper[4910]: I0105 21:53:38.498467 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:38 crc kubenswrapper[4910]: E0105 21:53:38.498980 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:38.998957951 +0000 UTC m=+150.576455621 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:38 crc kubenswrapper[4910]: I0105 21:53:38.534418 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-llpdj"] Jan 05 21:53:38 crc kubenswrapper[4910]: I0105 21:53:38.534479 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9s4dd"] Jan 05 21:53:38 crc kubenswrapper[4910]: I0105 21:53:38.534491 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-w9k7n"] Jan 05 21:53:38 crc kubenswrapper[4910]: I0105 21:53:38.554038 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5n2lb"] Jan 05 21:53:38 crc kubenswrapper[4910]: I0105 21:53:38.555936 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k6pjp"] Jan 05 21:53:38 crc kubenswrapper[4910]: W0105 21:53:38.599262 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod99fe5999_eb63_4256_885a_11e4e2023e30.slice/crio-f0ba8dccded735ae9723370506d6c7de09f35c69ee6d6f0fe1deefec7f65b358 WatchSource:0}: Error finding container f0ba8dccded735ae9723370506d6c7de09f35c69ee6d6f0fe1deefec7f65b358: Status 404 returned error can't find the container with id f0ba8dccded735ae9723370506d6c7de09f35c69ee6d6f0fe1deefec7f65b358 Jan 05 21:53:38 crc kubenswrapper[4910]: I0105 21:53:38.600805 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:38 crc kubenswrapper[4910]: E0105 21:53:38.601201 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:39.101181632 +0000 UTC m=+150.678679302 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:38 crc kubenswrapper[4910]: I0105 21:53:38.617525 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-4hrg2"] Jan 05 21:53:38 crc kubenswrapper[4910]: I0105 21:53:38.659437 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-dflsj"] Jan 05 21:53:38 crc kubenswrapper[4910]: I0105 21:53:38.701971 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:38 crc kubenswrapper[4910]: E0105 21:53:38.702415 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:39.202387732 +0000 UTC m=+150.779885402 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:38 crc kubenswrapper[4910]: I0105 21:53:38.804474 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:38 crc kubenswrapper[4910]: E0105 21:53:38.805000 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:39.304983334 +0000 UTC m=+150.882481004 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:38 crc kubenswrapper[4910]: I0105 21:53:38.891764 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zjkp"] Jan 05 21:53:38 crc kubenswrapper[4910]: I0105 21:53:38.909049 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:38 crc kubenswrapper[4910]: E0105 21:53:38.909544 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:39.409495765 +0000 UTC m=+150.986993435 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:38 crc kubenswrapper[4910]: I0105 21:53:38.909816 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:38 crc kubenswrapper[4910]: E0105 21:53:38.912386 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:39.412350723 +0000 UTC m=+150.989848383 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:38 crc kubenswrapper[4910]: I0105 21:53:38.947704 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-44vj4" event={"ID":"4f414412-c2c0-4fea-a255-2444675c6f5e","Type":"ContainerStarted","Data":"d5f30b9efc47031175df0f5ed1b070067a299cc1f66713787e7a08c7dbbcbfad"} Jan 05 21:53:38 crc kubenswrapper[4910]: W0105 21:53:38.977798 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podab37abef_211d_4733_8756_84bf4c9f9655.slice/crio-c9204825c94f3b0867dfcdc657c1f6c66fa9345733d0e84ef98c43cc5e1570e3 WatchSource:0}: Error finding container c9204825c94f3b0867dfcdc657c1f6c66fa9345733d0e84ef98c43cc5e1570e3: Status 404 returned error can't find the container with id c9204825c94f3b0867dfcdc657c1f6c66fa9345733d0e84ef98c43cc5e1570e3 Jan 05 21:53:38 crc kubenswrapper[4910]: I0105 21:53:38.981811 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-fcms5" event={"ID":"3b0afa0a-d1fe-4c63-a25f-3fd39b954817","Type":"ContainerStarted","Data":"10ec03469653ee87ecb80b5a870e4d8dd5b52e23f2efd0463e6d970fb830306b"} Jan 05 21:53:38 crc kubenswrapper[4910]: W0105 21:53:38.982243 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5d128f8c_6ea3_4ba0_96bc_8fcd5aac98bf.slice/crio-f1e0862d3f21d547906b24d873ecc9b67b5a2582c17e325d25eddf69c614e6c4 WatchSource:0}: Error finding container f1e0862d3f21d547906b24d873ecc9b67b5a2582c17e325d25eddf69c614e6c4: Status 404 returned error can't find the container with id f1e0862d3f21d547906b24d873ecc9b67b5a2582c17e325d25eddf69c614e6c4 Jan 05 21:53:38 crc kubenswrapper[4910]: I0105 21:53:38.992942 4910 generic.go:334] "Generic (PLEG): container finished" podID="b9194562-89b4-49cc-b0d2-7875fd2640d8" containerID="7e91a2d13cf6b4188ceea81c9cdb3b7ebd2e006975aac5375a95c4932949895c" exitCode=0 Jan 05 21:53:38 crc kubenswrapper[4910]: I0105 21:53:38.993416 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" event={"ID":"b9194562-89b4-49cc-b0d2-7875fd2640d8","Type":"ContainerDied","Data":"7e91a2d13cf6b4188ceea81c9cdb3b7ebd2e006975aac5375a95c4932949895c"} Jan 05 21:53:38 crc kubenswrapper[4910]: I0105 21:53:38.998595 4910 generic.go:334] "Generic (PLEG): container finished" podID="9f9b9ae8-cbde-41e1-b829-c1511c7f2973" containerID="18b812bf42c1e539d8443cb6dea458115c8fa11e3f65270e5a46c991846f6ca8" exitCode=0 Jan 05 21:53:38 crc kubenswrapper[4910]: I0105 21:53:38.998662 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-f56kd" event={"ID":"9f9b9ae8-cbde-41e1-b829-c1511c7f2973","Type":"ContainerDied","Data":"18b812bf42c1e539d8443cb6dea458115c8fa11e3f65270e5a46c991846f6ca8"} Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.011345 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:39 crc kubenswrapper[4910]: E0105 21:53:39.012428 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:39.512409558 +0000 UTC m=+151.089907228 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.050758 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pz96p" event={"ID":"ed6873d6-2014-4326-bb4f-939fab37b01c","Type":"ContainerStarted","Data":"b2085ce391b95e35b69aa7d16da923e443622cfa5e54efdb039581df9e2fef32"} Jan 05 21:53:39 crc kubenswrapper[4910]: W0105 21:53:39.062761 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod11fe188c_97f7_4e0c_a84f_d95edc5f5404.slice/crio-08a70ca0d2fe9e4fe500252888d501d6fc3426565b13ca575efaa5d74aa33e8b WatchSource:0}: Error finding container 08a70ca0d2fe9e4fe500252888d501d6fc3426565b13ca575efaa5d74aa33e8b: Status 404 returned error can't find the container with id 08a70ca0d2fe9e4fe500252888d501d6fc3426565b13ca575efaa5d74aa33e8b Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.072070 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-68wbd"] Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.072944 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-b7j29"] Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.119518 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.120035 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5n2lb" event={"ID":"7958e921-b665-41a4-8989-5988e6082b50","Type":"ContainerStarted","Data":"fee91452ea4aabce490b9b1c8298b97a825319006cc2e740f6bcce59da4a7979"} Jan 05 21:53:39 crc kubenswrapper[4910]: E0105 21:53:39.120571 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:39.62055092 +0000 UTC m=+151.198048580 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.154113 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-44vj4" podStartSLOduration=128.154085857 podStartE2EDuration="2m8.154085857s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:39.153615453 +0000 UTC m=+150.731113133" watchObservedRunningTime="2026-01-05 21:53:39.154085857 +0000 UTC m=+150.731583517" Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.167448 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-8z8h7" podStartSLOduration=128.167429206 podStartE2EDuration="2m8.167429206s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:39.119404835 +0000 UTC m=+150.696902525" watchObservedRunningTime="2026-01-05 21:53:39.167429206 +0000 UTC m=+150.744926876" Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.174182 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-fcms5" Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.193038 4910 patch_prober.go:28] interesting pod/router-default-5444994796-fcms5 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 05 21:53:39 crc kubenswrapper[4910]: [-]has-synced failed: reason withheld Jan 05 21:53:39 crc kubenswrapper[4910]: [+]process-running ok Jan 05 21:53:39 crc kubenswrapper[4910]: healthz check failed Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.193130 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-fcms5" podUID="3b0afa0a-d1fe-4c63-a25f-3fd39b954817" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.223811 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:39 crc kubenswrapper[4910]: E0105 21:53:39.224276 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:39.724251356 +0000 UTC m=+151.301749016 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.237173 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-4qps7"] Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.246771 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-4lmhx"] Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.254256 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-w9k7n" event={"ID":"99fe5999-eb63-4256-885a-11e4e2023e30","Type":"ContainerStarted","Data":"f0ba8dccded735ae9723370506d6c7de09f35c69ee6d6f0fe1deefec7f65b358"} Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.286680 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-k2d98" event={"ID":"b859ccb0-eb52-4086-8db1-cf1543b934d9","Type":"ContainerStarted","Data":"0814c6da94f25caf19ff033676bd1c61e81dfe25799322ed6bddbefffed0c828"} Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.286744 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-k2d98" event={"ID":"b859ccb0-eb52-4086-8db1-cf1543b934d9","Type":"ContainerStarted","Data":"df51d33b41701df6f88e032a6db0ff885ed32866698f0bb1a2909578b976ad0f"} Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.287353 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-k2d98" Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.310699 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n8vrv"] Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.325408 4910 patch_prober.go:28] interesting pod/downloads-7954f5f757-k2d98 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.325520 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-k2d98" podUID="b859ccb0-eb52-4086-8db1-cf1543b934d9" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.330463 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:39 crc kubenswrapper[4910]: E0105 21:53:39.331468 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:39.831415849 +0000 UTC m=+151.408913519 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.356007 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jh8zt" event={"ID":"37a900bd-079a-4b57-a7e6-e12a71e50d2f","Type":"ContainerStarted","Data":"315df00c7f6347f14ec99a13b3974f62fe5156e2b5d36718e3e3f4b2f4f7f73b"} Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.367082 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460825-hv5kn"] Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.373074 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-9pllm"] Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.375200 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m9xnz" podStartSLOduration=128.375179049 podStartE2EDuration="2m8.375179049s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:39.365307777 +0000 UTC m=+150.942805447" watchObservedRunningTime="2026-01-05 21:53:39.375179049 +0000 UTC m=+150.952676719" Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.408708 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ktdtg" event={"ID":"9b552109-8c1c-489b-9a57-028d5f24e462","Type":"ContainerStarted","Data":"5b3cca394c39f15dd3dcb5cf4f4bf7c1921f0dae3eb9169065fa537ad039e4f2"} Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.411363 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-rzh7r" event={"ID":"4f960de5-cb6c-4b0f-84fb-969264facb30","Type":"ContainerStarted","Data":"9e2f452afabc7dce3b5bf5e90f7301b24e81dbb50b66de72c6b36fbc13dd5ce7"} Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.413209 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-lj4dw" event={"ID":"c14b4107-38b0-488e-a466-34fe6914f075","Type":"ContainerStarted","Data":"3d044162e296a0807878bf0b7463172cdd7940770ddc9379a2cdfe65d3956ff5"} Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.418632 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-ck2fz" event={"ID":"37186a9e-7601-41d6-9083-c2231119635d","Type":"ContainerStarted","Data":"698af07a53d8dec0294e9b981c484a51771793cdef9c41204b47a3367a266cfe"} Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.426100 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-47djl" event={"ID":"42cd7d43-1bdf-4961-bcae-6f638a83b8e0","Type":"ContainerStarted","Data":"749090cbfcf9ef86975d0b06038fa21b631d825d7a11e3793c0c96cbeb05d542"} Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.439231 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:39 crc kubenswrapper[4910]: E0105 21:53:39.440224 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:39.940203571 +0000 UTC m=+151.517701241 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.452880 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zpbbr" event={"ID":"d5c787df-89be-4f93-8fae-b35d9bea1dfa","Type":"ContainerStarted","Data":"d57de10a473cd282d2b87566b015f30d3932f98cde69142508583c54917a1ddf"} Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.469924 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" event={"ID":"aa805313-499f-47e9-8ffa-827fb2664a71","Type":"ContainerStarted","Data":"5ecee57b82781d0b4c3e3c55bba5a8bbc0addee4eda8d09dfeff805ba544f468"} Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.485677 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-fcms5" podStartSLOduration=128.485653553 podStartE2EDuration="2m8.485653553s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:39.484015703 +0000 UTC m=+151.061513373" watchObservedRunningTime="2026-01-05 21:53:39.485653553 +0000 UTC m=+151.063151243" Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.505307 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.544315 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:39 crc kubenswrapper[4910]: E0105 21:53:39.544763 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:40.044744123 +0000 UTC m=+151.622241803 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.573389 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9tbp2" event={"ID":"c897d56d-7140-4aae-b1df-288502d6c78c","Type":"ContainerStarted","Data":"3d533d2f15e6410d8eee122a0e6927274fd0adf76b9d7a06cc51b84c51d964f7"} Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.587031 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8ncwl" event={"ID":"55abc4f3-cea0-4cfc-9cb8-49c2be5598c1","Type":"ContainerStarted","Data":"f8c07c2495413f067032d604abd9686a291e851732d118aeba73b46de545d43d"} Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.600667 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jwxvn" event={"ID":"d0c9cc0f-1a99-4138-a54b-e33f6ac83988","Type":"ContainerStarted","Data":"de807f18e2adc343b7099e849cee008ada1ef5821acaefac926cd067db4eab4f"} Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.645232 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:39 crc kubenswrapper[4910]: E0105 21:53:39.646329 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:40.146307024 +0000 UTC m=+151.723804694 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.652426 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-6d5lf" event={"ID":"3526640e-85a9-41f1-b79d-c31854227b25","Type":"ContainerStarted","Data":"9f7e45feabe20d854885afb3313c4038fc584ce41efe98001ced63db1068ccb1"} Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.674701 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-j9xtz" event={"ID":"5e51587e-3444-440f-802a-347a93a869ad","Type":"ContainerStarted","Data":"560cf05e013e7959ce0a0c0fc1b374e4bc1fc211e23ce3a1c6fabc82f94c849e"} Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.687079 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-g5xxj" podStartSLOduration=128.687050751 podStartE2EDuration="2m8.687050751s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:39.653245706 +0000 UTC m=+151.230743376" watchObservedRunningTime="2026-01-05 21:53:39.687050751 +0000 UTC m=+151.264548421" Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.712474 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-82zlp" event={"ID":"69e2d768-6b62-446e-a239-4b221ba0a979","Type":"ContainerStarted","Data":"3348600e07318dda31c69edea321f0b86c2c68c72996f165b861a5de4af3df47"} Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.714115 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-82zlp" Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.733034 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-82zlp" Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.736188 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-llpdj" event={"ID":"df73d562-aee4-4b56-b241-bd31f5c95714","Type":"ContainerStarted","Data":"9df8da22591fa7f0bda034dddf685adb748c74aea7bb7a0625b5cf016904f716"} Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.746975 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:39 crc kubenswrapper[4910]: E0105 21:53:39.750704 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:40.250683991 +0000 UTC m=+151.828181661 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.784364 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" podStartSLOduration=128.784345071 podStartE2EDuration="2m8.784345071s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:39.779530583 +0000 UTC m=+151.357028253" watchObservedRunningTime="2026-01-05 21:53:39.784345071 +0000 UTC m=+151.361842741" Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.847526 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:39 crc kubenswrapper[4910]: E0105 21:53:39.848335 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:40.34831205 +0000 UTC m=+151.925809720 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:39 crc kubenswrapper[4910]: I0105 21:53:39.955705 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:39 crc kubenswrapper[4910]: E0105 21:53:39.956059 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:40.45604451 +0000 UTC m=+152.033542180 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.062599 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:40 crc kubenswrapper[4910]: E0105 21:53:40.068704 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:40.568660029 +0000 UTC m=+152.146157699 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.068877 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:40 crc kubenswrapper[4910]: E0105 21:53:40.071134 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:40.571104374 +0000 UTC m=+152.148602044 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:40 crc kubenswrapper[4910]: E0105 21:53:40.073877 4910 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poded6873d6_2014_4326_bb4f_939fab37b01c.slice/crio-5549da56fa39e4e771b52c6c1fe663ccc80edf13a1be608174419559c0804f47.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poded6873d6_2014_4326_bb4f_939fab37b01c.slice/crio-conmon-5549da56fa39e4e771b52c6c1fe663ccc80edf13a1be608174419559c0804f47.scope\": RecentStats: unable to find data in memory cache]" Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.183928 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:40 crc kubenswrapper[4910]: E0105 21:53:40.184703 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:40.684667042 +0000 UTC m=+152.262164712 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.196252 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:40 crc kubenswrapper[4910]: E0105 21:53:40.196838 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:40.696817874 +0000 UTC m=+152.274315544 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.206477 4910 patch_prober.go:28] interesting pod/router-default-5444994796-fcms5 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 05 21:53:40 crc kubenswrapper[4910]: [-]has-synced failed: reason withheld Jan 05 21:53:40 crc kubenswrapper[4910]: [+]process-running ok Jan 05 21:53:40 crc kubenswrapper[4910]: healthz check failed Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.206535 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-fcms5" podUID="3b0afa0a-d1fe-4c63-a25f-3fd39b954817" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.258697 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9tbp2" podStartSLOduration=129.258664069 podStartE2EDuration="2m9.258664069s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:40.203250051 +0000 UTC m=+151.780747711" watchObservedRunningTime="2026-01-05 21:53:40.258664069 +0000 UTC m=+151.836161739" Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.259696 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jh8zt" podStartSLOduration=129.25969056 podStartE2EDuration="2m9.25969056s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:40.246806976 +0000 UTC m=+151.824304646" watchObservedRunningTime="2026-01-05 21:53:40.25969056 +0000 UTC m=+151.837188230" Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.303482 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:40 crc kubenswrapper[4910]: E0105 21:53:40.303691 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:40.803663437 +0000 UTC m=+152.381161107 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.303989 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:40 crc kubenswrapper[4910]: E0105 21:53:40.304366 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:40.804356608 +0000 UTC m=+152.381854278 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.404729 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-k2d98" podStartSLOduration=129.404700372 podStartE2EDuration="2m9.404700372s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:40.290164744 +0000 UTC m=+151.867662414" watchObservedRunningTime="2026-01-05 21:53:40.404700372 +0000 UTC m=+151.982198032" Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.405094 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:40 crc kubenswrapper[4910]: E0105 21:53:40.406057 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:40.906036183 +0000 UTC m=+152.483533853 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.473563 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-47djl" podStartSLOduration=129.47353993 podStartE2EDuration="2m9.47353993s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:40.43858615 +0000 UTC m=+152.016083820" watchObservedRunningTime="2026-01-05 21:53:40.47353993 +0000 UTC m=+152.051037600" Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.507291 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:40 crc kubenswrapper[4910]: E0105 21:53:40.507674 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:41.007657555 +0000 UTC m=+152.585155225 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.519516 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-j9xtz" podStartSLOduration=129.519468967 podStartE2EDuration="2m9.519468967s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:40.484091683 +0000 UTC m=+152.061589353" watchObservedRunningTime="2026-01-05 21:53:40.519468967 +0000 UTC m=+152.096966637" Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.520573 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-82zlp" podStartSLOduration=128.520562851 podStartE2EDuration="2m8.520562851s" podCreationTimestamp="2026-01-05 21:51:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:40.518462766 +0000 UTC m=+152.095960426" watchObservedRunningTime="2026-01-05 21:53:40.520562851 +0000 UTC m=+152.098060541" Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.609016 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:40 crc kubenswrapper[4910]: E0105 21:53:40.609763 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:41.109738492 +0000 UTC m=+152.687236162 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.715496 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:40 crc kubenswrapper[4910]: E0105 21:53:40.716389 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:41.216364248 +0000 UTC m=+152.793861918 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.812056 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k6pjp" event={"ID":"e7d8a10a-7ee3-4aa6-a30f-e7ca0711c65c","Type":"ContainerStarted","Data":"0e077a58c19955579d0c15b7257efa0b3be0eec2823ef5197578de03d0a9d884"} Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.812099 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k6pjp" event={"ID":"e7d8a10a-7ee3-4aa6-a30f-e7ca0711c65c","Type":"ContainerStarted","Data":"41913107713dc4ad80da628f12aab765e8ada96a705e92417e374402de3c3e14"} Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.817926 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:40 crc kubenswrapper[4910]: E0105 21:53:40.818335 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:41.31831086 +0000 UTC m=+152.895808520 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.856382 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k6pjp" podStartSLOduration=129.856359926 podStartE2EDuration="2m9.856359926s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:40.853929051 +0000 UTC m=+152.431426721" watchObservedRunningTime="2026-01-05 21:53:40.856359926 +0000 UTC m=+152.433857596" Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.865343 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ktdtg" event={"ID":"9b552109-8c1c-489b-9a57-028d5f24e462","Type":"ContainerStarted","Data":"ad147a55ef40e92aa306f68dbcb12ab24c812e29bbb753aaa9726d4d68acba02"} Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.865397 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ktdtg" event={"ID":"9b552109-8c1c-489b-9a57-028d5f24e462","Type":"ContainerStarted","Data":"9a9c84009fb89c6b60d926469cb037e5002cf643a7fe1fcbdd60bf0b73af4d81"} Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.909771 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ktdtg" podStartSLOduration=129.909754991 podStartE2EDuration="2m9.909754991s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:40.906543933 +0000 UTC m=+152.484041603" watchObservedRunningTime="2026-01-05 21:53:40.909754991 +0000 UTC m=+152.487252661" Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.927137 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:40 crc kubenswrapper[4910]: E0105 21:53:40.928715 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:41.428692041 +0000 UTC m=+153.006189911 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.929141 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zpbbr" event={"ID":"d5c787df-89be-4f93-8fae-b35d9bea1dfa","Type":"ContainerStarted","Data":"504dd6f904dc18cbc314dc9dfdfbb8bd51dfb8fad95816b3f1a6b31aed1c4cb9"} Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.929184 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zpbbr" event={"ID":"d5c787df-89be-4f93-8fae-b35d9bea1dfa","Type":"ContainerStarted","Data":"840e8d9ccacf2b443b88b90057794a3ebddcd45ece0d21487a2f057dfc2b7955"} Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.948480 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-rzh7r" event={"ID":"4f960de5-cb6c-4b0f-84fb-969264facb30","Type":"ContainerStarted","Data":"7dfff9392a3ced862c14d58f387a508e4bb42d7d3778fc5b09c8af377630740e"} Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.950505 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jwxvn" event={"ID":"d0c9cc0f-1a99-4138-a54b-e33f6ac83988","Type":"ContainerStarted","Data":"83eb885069dd2cbebb468dcc1548fa55059541866ac76455e3129b1e8dc32409"} Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.956954 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.957003 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.969882 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-zpbbr" podStartSLOduration=129.969861792 podStartE2EDuration="2m9.969861792s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:40.965493778 +0000 UTC m=+152.542991448" watchObservedRunningTime="2026-01-05 21:53:40.969861792 +0000 UTC m=+152.547359452" Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.988446 4910 generic.go:334] "Generic (PLEG): container finished" podID="ed6873d6-2014-4326-bb4f-939fab37b01c" containerID="5549da56fa39e4e771b52c6c1fe663ccc80edf13a1be608174419559c0804f47" exitCode=0 Jan 05 21:53:40 crc kubenswrapper[4910]: I0105 21:53:40.988543 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pz96p" event={"ID":"ed6873d6-2014-4326-bb4f-939fab37b01c","Type":"ContainerDied","Data":"5549da56fa39e4e771b52c6c1fe663ccc80edf13a1be608174419559c0804f47"} Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.022867 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-sfnhj" event={"ID":"12041e15-b6da-4d62-b434-ceb0e39480a6","Type":"ContainerStarted","Data":"30a03be24c005ec8775643ba7a8755e3a178a3cba803eade799700ad927e70c1"} Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.030046 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:41 crc kubenswrapper[4910]: E0105 21:53:41.030388 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:41.530338985 +0000 UTC m=+153.107836655 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.030618 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:41 crc kubenswrapper[4910]: E0105 21:53:41.031942 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:41.531925083 +0000 UTC m=+153.109422753 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.051181 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-llpdj" event={"ID":"df73d562-aee4-4b56-b241-bd31f5c95714","Type":"ContainerStarted","Data":"11a17322adb2c5ff1ea5fe398d7f644f21e7b4480df5304859a2db118fe121f8"} Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.051871 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-llpdj" Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.056448 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-rzh7r" podStartSLOduration=7.056430394 podStartE2EDuration="7.056430394s" podCreationTimestamp="2026-01-05 21:53:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:41.014077507 +0000 UTC m=+152.591575177" watchObservedRunningTime="2026-01-05 21:53:41.056430394 +0000 UTC m=+152.633928064" Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.061602 4910 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-llpdj container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.41:8080/healthz\": dial tcp 10.217.0.41:8080: connect: connection refused" start-of-body= Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.061689 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-llpdj" podUID="df73d562-aee4-4b56-b241-bd31f5c95714" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.41:8080/healthz\": dial tcp 10.217.0.41:8080: connect: connection refused" Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.068488 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-j9xtz" event={"ID":"5e51587e-3444-440f-802a-347a93a869ad","Type":"ContainerStarted","Data":"ee5465a0af4a96341cf885f6e10be0de787d5df6dc08d04827925fbf23c02abc"} Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.083623 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-llpdj" podStartSLOduration=130.083604096 podStartE2EDuration="2m10.083604096s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:41.082709269 +0000 UTC m=+152.660206939" watchObservedRunningTime="2026-01-05 21:53:41.083604096 +0000 UTC m=+152.661101766" Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.086829 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-9pllm" event={"ID":"3b17c4a3-a1fa-491b-ba02-fb1de889551b","Type":"ContainerStarted","Data":"ae5bc50ccfdfd22e342c72dfd614bbfcb931ae9d0bfea4092db86355e3b5b81b"} Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.100003 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5n2lb" event={"ID":"7958e921-b665-41a4-8989-5988e6082b50","Type":"ContainerStarted","Data":"aebf1ac57b0d9791025b09da57bfd682a106f19bd562b271ce5027ebe0e7cce5"} Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.114318 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-sfnhj" podStartSLOduration=130.114298586 podStartE2EDuration="2m10.114298586s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:41.108479628 +0000 UTC m=+152.685977298" watchObservedRunningTime="2026-01-05 21:53:41.114298586 +0000 UTC m=+152.691796256" Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.132004 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.188923 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 21:53:41 crc kubenswrapper[4910]: E0105 21:53:41.191475 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:41.691439959 +0000 UTC m=+153.268937629 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.201198 4910 patch_prober.go:28] interesting pod/router-default-5444994796-fcms5 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 05 21:53:41 crc kubenswrapper[4910]: [-]has-synced failed: reason withheld Jan 05 21:53:41 crc kubenswrapper[4910]: [+]process-running ok Jan 05 21:53:41 crc kubenswrapper[4910]: healthz check failed Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.202369 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-fcms5" podUID="3b0afa0a-d1fe-4c63-a25f-3fd39b954817" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.201334 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-f56kd" event={"ID":"9f9b9ae8-cbde-41e1-b829-c1511c7f2973","Type":"ContainerStarted","Data":"37af46eb2f7daec36314f0941530c96df61c684f7ae12f384ef408735da3bbb2"} Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.223727 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:41 crc kubenswrapper[4910]: E0105 21:53:41.230934 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:41.730913208 +0000 UTC m=+153.308410878 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.257725 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5n2lb" podStartSLOduration=130.257673398 podStartE2EDuration="2m10.257673398s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:41.221329235 +0000 UTC m=+152.798826905" watchObservedRunningTime="2026-01-05 21:53:41.257673398 +0000 UTC m=+152.835171068" Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.265697 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-dflsj" event={"ID":"ab37abef-211d-4733-8756-84bf4c9f9655","Type":"ContainerStarted","Data":"4206cb6a1f8b853a100aa0fa1d5ad637a5cbb8f2c60358fb2f3fc8c3c7a37923"} Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.265755 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-dflsj" event={"ID":"ab37abef-211d-4733-8756-84bf4c9f9655","Type":"ContainerStarted","Data":"c9204825c94f3b0867dfcdc657c1f6c66fa9345733d0e84ef98c43cc5e1570e3"} Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.294949 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-dflsj" Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.321833 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-dflsj" Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.326350 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:41 crc kubenswrapper[4910]: E0105 21:53:41.327519 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:41.827495376 +0000 UTC m=+153.404993046 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.347686 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-dflsj" podStartSLOduration=130.347654294 podStartE2EDuration="2m10.347654294s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:41.346652673 +0000 UTC m=+152.924150343" watchObservedRunningTime="2026-01-05 21:53:41.347654294 +0000 UTC m=+152.925151964" Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.353435 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-4qps7" event={"ID":"8f0966dc-62e7-4ee6-827c-f3435f1d5d9b","Type":"ContainerStarted","Data":"7b6d92ca30d87b76596809ab96ff1e1200801fee00951e567dc0b58142433884"} Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.353484 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-4qps7" event={"ID":"8f0966dc-62e7-4ee6-827c-f3435f1d5d9b","Type":"ContainerStarted","Data":"095f0b02a57d1ba03634a31f4ba6a4d598bb4cdff1bce04e282d7f8e59658f91"} Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.400566 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4hrg2" event={"ID":"11fe188c-97f7-4e0c-a84f-d95edc5f5404","Type":"ContainerStarted","Data":"08a70ca0d2fe9e4fe500252888d501d6fc3426565b13ca575efaa5d74aa33e8b"} Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.409466 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-4qps7" podStartSLOduration=7.409442256 podStartE2EDuration="7.409442256s" podCreationTimestamp="2026-01-05 21:53:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:41.398528552 +0000 UTC m=+152.976026222" watchObservedRunningTime="2026-01-05 21:53:41.409442256 +0000 UTC m=+152.986939926" Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.430063 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:41 crc kubenswrapper[4910]: E0105 21:53:41.431912 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:41.931889684 +0000 UTC m=+153.509387354 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.447627 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4s99g" event={"ID":"56ef4f14-9acd-41fe-894d-5fbbe990da8e","Type":"ContainerStarted","Data":"0ce31dd236a79b76d8443b3c9b478dd875b08ddef840a1af74d6dc49362e5bc4"} Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.496647 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n8vrv" event={"ID":"c3f548c3-cff0-49a6-a800-78732bc54c37","Type":"ContainerStarted","Data":"e75edaa4cc210fa920ccf4c2e5e9da01afebf25020bd029170d17afcd6ee759f"} Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.498471 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n8vrv" Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.519299 4910 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-n8vrv container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.35:5443/healthz\": dial tcp 10.217.0.35:5443: connect: connection refused" start-of-body= Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.519368 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n8vrv" podUID="c3f548c3-cff0-49a6-a800-78732bc54c37" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.35:5443/healthz\": dial tcp 10.217.0.35:5443: connect: connection refused" Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.532505 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:41 crc kubenswrapper[4910]: E0105 21:53:41.534395 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:42.034352982 +0000 UTC m=+153.611850652 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.534405 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9s4dd" event={"ID":"5d128f8c-6ea3-4ba0-96bc-8fcd5aac98bf","Type":"ContainerStarted","Data":"fc4af1e33b0f3d6e1aaceb9b20f1095e630000e2253d9dd727a70d13cde4130e"} Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.534463 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9s4dd" event={"ID":"5d128f8c-6ea3-4ba0-96bc-8fcd5aac98bf","Type":"ContainerStarted","Data":"f1e0862d3f21d547906b24d873ecc9b67b5a2582c17e325d25eddf69c614e6c4"} Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.562237 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8ncwl" event={"ID":"55abc4f3-cea0-4cfc-9cb8-49c2be5598c1","Type":"ContainerStarted","Data":"826f89f9ad732e18ed2e1397c4e500d5f1668ff790172b1f6f68429cb031650e"} Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.599683 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cbfc7" event={"ID":"00fa008c-8f60-4ec4-ba95-a58e71658276","Type":"ContainerStarted","Data":"1a219f7e727fc95617f148171e0d6bf6c132b386c7ce60a8569cb0d649c80ee1"} Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.623167 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4hrg2" podStartSLOduration=129.623145652 podStartE2EDuration="2m9.623145652s" podCreationTimestamp="2026-01-05 21:51:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:41.455434925 +0000 UTC m=+153.032932595" watchObservedRunningTime="2026-01-05 21:53:41.623145652 +0000 UTC m=+153.200643322" Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.624872 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-4s99g" podStartSLOduration=130.624867545 podStartE2EDuration="2m10.624867545s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:41.623431951 +0000 UTC m=+153.200929621" watchObservedRunningTime="2026-01-05 21:53:41.624867545 +0000 UTC m=+153.202365205" Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.629368 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zjkp" event={"ID":"70202c0f-c1cb-4f24-a5ed-c8eaa0cddf2a","Type":"ContainerStarted","Data":"7c09c1f56e01d68cf3e4ccd3334618fdf6010161b369cafb9d359bbe77f2292f"} Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.629443 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zjkp" event={"ID":"70202c0f-c1cb-4f24-a5ed-c8eaa0cddf2a","Type":"ContainerStarted","Data":"818d24237c7b3e28d5bd9a07ff91a99ef86b01b0c4589ef08a1f3304b4d4893b"} Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.630877 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zjkp" Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.633844 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:41 crc kubenswrapper[4910]: E0105 21:53:41.635567 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:42.135552722 +0000 UTC m=+153.713050392 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.654540 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-lj4dw" event={"ID":"c14b4107-38b0-488e-a466-34fe6914f075","Type":"ContainerStarted","Data":"fbc0c3008b59b61ecf0d50c79d894eede258f34e087d7c8857add4cc50acd36d"} Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.674440 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-68wbd" event={"ID":"9eb7ed0e-62c4-4cc2-b1b5-bfe1d5e38028","Type":"ContainerStarted","Data":"0e403b3d39cfb477e61aa6669688dfa9b787c9ab81a241665d28e502a0914f90"} Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.674502 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-68wbd" event={"ID":"9eb7ed0e-62c4-4cc2-b1b5-bfe1d5e38028","Type":"ContainerStarted","Data":"c52962d6f4135b8d27f082e5c8666b5c7e3da4d2ee500ba4ae8fb8d14beed404"} Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.679624 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zjkp" Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.684055 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-w9k7n" event={"ID":"99fe5999-eb63-4256-885a-11e4e2023e30","Type":"ContainerStarted","Data":"8338f902c812b44a5b0b7067b21709c2038a86d467fbcafb7d02e127fcb7b08f"} Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.733472 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-ck2fz" event={"ID":"37186a9e-7601-41d6-9083-c2231119635d","Type":"ContainerStarted","Data":"ae4b3a2254ffee4b7bfb26fbe034dbcd3c804d060084b2f76e3dc74e3da22184"} Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.737499 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-ck2fz" Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.737929 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:41 crc kubenswrapper[4910]: E0105 21:53:41.739906 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:42.239882397 +0000 UTC m=+153.817380067 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.764040 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-4lmhx" event={"ID":"fcd4f024-3377-4bda-8dfd-bced91254447","Type":"ContainerStarted","Data":"5e0271f006b89ea7328c253ae9fc97180a078bd00d1723470c57e06e432574bb"} Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.786859 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-6d5lf" event={"ID":"3526640e-85a9-41f1-b79d-c31854227b25","Type":"ContainerStarted","Data":"232c1951579de81bac5b2d3809fba6fc63ed41156579d883da2a2c52faee5890"} Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.786912 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-6d5lf" event={"ID":"3526640e-85a9-41f1-b79d-c31854227b25","Type":"ContainerStarted","Data":"c7385fcab6b4ac16373fd9fae047a20f4e787a6b1109be45231bbd16593ed31f"} Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.823339 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29460825-hv5kn" event={"ID":"ee40a1b4-967e-40aa-b6c0-eaf211346941","Type":"ContainerStarted","Data":"39fcb23d1bc88255d10b9dad6c2f83831fbf190d1312d6790dbbbbd18e55396e"} Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.839224 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:41 crc kubenswrapper[4910]: E0105 21:53:41.840665 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:42.340645124 +0000 UTC m=+153.918142794 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.852523 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-b7j29" event={"ID":"738f7ea4-3ce0-44b3-8757-1ad261e59de3","Type":"ContainerStarted","Data":"a484ea59de7fd64af718b846ccebaace1ffc43427e7c5bfd14d36e0abf30b8af"} Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.852611 4910 patch_prober.go:28] interesting pod/downloads-7954f5f757-k2d98 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.852675 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-k2d98" podUID="b859ccb0-eb52-4086-8db1-cf1543b934d9" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.900104 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n8vrv" podStartSLOduration=130.900085624 podStartE2EDuration="2m10.900085624s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:41.898501976 +0000 UTC m=+153.475999646" watchObservedRunningTime="2026-01-05 21:53:41.900085624 +0000 UTC m=+153.477583294" Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.900551 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-cbfc7" podStartSLOduration=130.900545318 podStartE2EDuration="2m10.900545318s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:41.779666606 +0000 UTC m=+153.357164276" watchObservedRunningTime="2026-01-05 21:53:41.900545318 +0000 UTC m=+153.478042988" Jan 05 21:53:41 crc kubenswrapper[4910]: I0105 21:53:41.941790 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:41 crc kubenswrapper[4910]: E0105 21:53:41.942900 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:42.442865885 +0000 UTC m=+154.020363555 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.044364 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:42 crc kubenswrapper[4910]: E0105 21:53:42.047100 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:42.547085497 +0000 UTC m=+154.124583167 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.075162 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8ncwl" podStartSLOduration=131.075142036 podStartE2EDuration="2m11.075142036s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:42.029151257 +0000 UTC m=+153.606648927" watchObservedRunningTime="2026-01-05 21:53:42.075142036 +0000 UTC m=+153.652639706" Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.075569 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9s4dd" podStartSLOduration=131.075564439 podStartE2EDuration="2m11.075564439s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:42.073753254 +0000 UTC m=+153.651250944" watchObservedRunningTime="2026-01-05 21:53:42.075564439 +0000 UTC m=+153.653062109" Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.145930 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:42 crc kubenswrapper[4910]: E0105 21:53:42.146191 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:42.646149711 +0000 UTC m=+154.223647381 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.146283 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:42 crc kubenswrapper[4910]: E0105 21:53:42.146611 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:42.646601135 +0000 UTC m=+154.224098805 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.184521 4910 patch_prober.go:28] interesting pod/router-default-5444994796-fcms5 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 05 21:53:42 crc kubenswrapper[4910]: [-]has-synced failed: reason withheld Jan 05 21:53:42 crc kubenswrapper[4910]: [+]process-running ok Jan 05 21:53:42 crc kubenswrapper[4910]: healthz check failed Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.184615 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-fcms5" podUID="3b0afa0a-d1fe-4c63-a25f-3fd39b954817" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.236224 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" podStartSLOduration=131.236200279 podStartE2EDuration="2m11.236200279s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:42.235014623 +0000 UTC m=+153.812512323" watchObservedRunningTime="2026-01-05 21:53:42.236200279 +0000 UTC m=+153.813697949" Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.247435 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:42 crc kubenswrapper[4910]: E0105 21:53:42.247670 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:42.747627309 +0000 UTC m=+154.325124989 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.247834 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:42 crc kubenswrapper[4910]: E0105 21:53:42.248195 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:42.748179726 +0000 UTC m=+154.325677386 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.349101 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:42 crc kubenswrapper[4910]: E0105 21:53:42.349309 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:42.849273763 +0000 UTC m=+154.426771433 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.350018 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:42 crc kubenswrapper[4910]: E0105 21:53:42.350459 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:42.850449829 +0000 UTC m=+154.427947499 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.363165 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-ck2fz" podStartSLOduration=131.363142487 podStartE2EDuration="2m11.363142487s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:42.361399404 +0000 UTC m=+153.938897074" watchObservedRunningTime="2026-01-05 21:53:42.363142487 +0000 UTC m=+153.940640157" Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.363284 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29460825-hv5kn" podStartSLOduration=131.363278952 podStartE2EDuration="2m11.363278952s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:42.30284262 +0000 UTC m=+153.880340290" watchObservedRunningTime="2026-01-05 21:53:42.363278952 +0000 UTC m=+153.940776622" Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.402953 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-ck2fz" Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.408891 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-6d5lf" podStartSLOduration=131.408868518 podStartE2EDuration="2m11.408868518s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:42.406519206 +0000 UTC m=+153.984016876" watchObservedRunningTime="2026-01-05 21:53:42.408868518 +0000 UTC m=+153.986366188" Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.451631 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:42 crc kubenswrapper[4910]: E0105 21:53:42.451987 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:42.951966218 +0000 UTC m=+154.529463888 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.518362 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-lj4dw" podStartSLOduration=131.518334731 podStartE2EDuration="2m11.518334731s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:42.476377936 +0000 UTC m=+154.053875606" watchObservedRunningTime="2026-01-05 21:53:42.518334731 +0000 UTC m=+154.095832401" Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.519788 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zjkp" podStartSLOduration=131.519781185 podStartE2EDuration="2m11.519781185s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:42.515818394 +0000 UTC m=+154.093316064" watchObservedRunningTime="2026-01-05 21:53:42.519781185 +0000 UTC m=+154.097278855" Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.553411 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:42 crc kubenswrapper[4910]: E0105 21:53:42.556955 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:43.056923853 +0000 UTC m=+154.634421523 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.575354 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-68wbd" podStartSLOduration=130.575328037 podStartE2EDuration="2m10.575328037s" podCreationTimestamp="2026-01-05 21:51:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:42.559304956 +0000 UTC m=+154.136802626" watchObservedRunningTime="2026-01-05 21:53:42.575328037 +0000 UTC m=+154.152825707" Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.658898 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:42 crc kubenswrapper[4910]: E0105 21:53:42.659252 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:43.159229626 +0000 UTC m=+154.736727296 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.761030 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:42 crc kubenswrapper[4910]: E0105 21:53:42.761423 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:43.261394696 +0000 UTC m=+154.838892366 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.862304 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:42 crc kubenswrapper[4910]: E0105 21:53:42.862726 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:43.362704059 +0000 UTC m=+154.940201729 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.887852 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jwxvn" event={"ID":"d0c9cc0f-1a99-4138-a54b-e33f6ac83988","Type":"ContainerStarted","Data":"7e1791aee2b477b15534a857ac15d55ae44099c680382463e62bd13119d8326b"} Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.887942 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jwxvn" Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.894475 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8ncwl" event={"ID":"55abc4f3-cea0-4cfc-9cb8-49c2be5598c1","Type":"ContainerStarted","Data":"f7aeeb9a621c172b9680dee94b50ab77bc0831af226f09793b93d2024dc34e81"} Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.899949 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-w9k7n" event={"ID":"99fe5999-eb63-4256-885a-11e4e2023e30","Type":"ContainerStarted","Data":"e838922bd88b0c050d7b25ccf34f31ea7ca726e28c916bff5cca7178f66ce2f6"} Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.902747 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pz96p" event={"ID":"ed6873d6-2014-4326-bb4f-939fab37b01c","Type":"ContainerStarted","Data":"489f5b1d0dc405afdcd021e1b1e9e770d7c81c18acd0539ec312c92e42ff8ccf"} Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.902938 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pz96p" Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.904328 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n8vrv" event={"ID":"c3f548c3-cff0-49a6-a800-78732bc54c37","Type":"ContainerStarted","Data":"82734d01711eec80b00854da7980227f052c67cf5357fd9998048d8ddcef9c9e"} Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.907384 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4hrg2" event={"ID":"11fe188c-97f7-4e0c-a84f-d95edc5f5404","Type":"ContainerStarted","Data":"e304953e3a6c176ba9c7966c03d32ed897a26988e8a9aae09c3dafcc92140132"} Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.909544 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-lj4dw" event={"ID":"c14b4107-38b0-488e-a466-34fe6914f075","Type":"ContainerStarted","Data":"5a60d11e6a5eb266bb18cc63bb23f1bfaabf48892366f75fc8391d8131a91586"} Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.916558 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jwxvn" podStartSLOduration=131.916526037 podStartE2EDuration="2m11.916526037s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:42.911252496 +0000 UTC m=+154.488750166" watchObservedRunningTime="2026-01-05 21:53:42.916526037 +0000 UTC m=+154.494023707" Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.919804 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-f56kd" event={"ID":"9f9b9ae8-cbde-41e1-b829-c1511c7f2973","Type":"ContainerStarted","Data":"66ddec95f10700799293920add7f5dece7cf30ec8c6556a4fe56ea7509b13f8f"} Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.922598 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-9pllm" event={"ID":"3b17c4a3-a1fa-491b-ba02-fb1de889551b","Type":"ContainerStarted","Data":"b85f7294eade3406a536b7d7fdb87a8e983bccbbe567a849e065aad81b234f48"} Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.922652 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-9pllm" event={"ID":"3b17c4a3-a1fa-491b-ba02-fb1de889551b","Type":"ContainerStarted","Data":"5623a15d8bc05990019f5cb06f41dbcad2fee47ca931a0fa91bb3b7d6dea97d8"} Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.922695 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-9pllm" Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.928582 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" event={"ID":"b9194562-89b4-49cc-b0d2-7875fd2640d8","Type":"ContainerStarted","Data":"946ebac67959a8a6ae804a75be21bd5c8d6174bb88967029b556dd85d98ab62e"} Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.933958 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29460825-hv5kn" event={"ID":"ee40a1b4-967e-40aa-b6c0-eaf211346941","Type":"ContainerStarted","Data":"a057eee485ddc3526ec9ba8e9e55caed65d80e741204ef899ff001441b02e82f"} Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.940347 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-4lmhx" event={"ID":"fcd4f024-3377-4bda-8dfd-bced91254447","Type":"ContainerStarted","Data":"c3bab4d4b6954a3d0a96c5bd7d6b0be65fdc554918b71f75389624ceb829abf0"} Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.941405 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pz96p" podStartSLOduration=131.941385199 podStartE2EDuration="2m11.941385199s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:42.940631946 +0000 UTC m=+154.518129606" watchObservedRunningTime="2026-01-05 21:53:42.941385199 +0000 UTC m=+154.518882869" Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.948691 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-b7j29" event={"ID":"738f7ea4-3ce0-44b3-8757-1ad261e59de3","Type":"ContainerStarted","Data":"32cc1269a65ee020e1dc8e7aabe3ca2b9e7ff5d1a12e93b89a35167d9a1f8781"} Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.948740 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-b7j29" event={"ID":"738f7ea4-3ce0-44b3-8757-1ad261e59de3","Type":"ContainerStarted","Data":"8f80563d7112f2b5c5c18ed8f09c5c84b609f5f9338e8b90e15c26b30afb8e2d"} Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.950099 4910 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-llpdj container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.41:8080/healthz\": dial tcp 10.217.0.41:8080: connect: connection refused" start-of-body= Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.950156 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-llpdj" podUID="df73d562-aee4-4b56-b241-bd31f5c95714" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.41:8080/healthz\": dial tcp 10.217.0.41:8080: connect: connection refused" Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.964341 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:42 crc kubenswrapper[4910]: E0105 21:53:42.964822 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:43.464801346 +0000 UTC m=+155.042299016 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:42 crc kubenswrapper[4910]: I0105 21:53:42.973962 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-w9k7n" podStartSLOduration=131.973940296 podStartE2EDuration="2m11.973940296s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:42.97114037 +0000 UTC m=+154.548638040" watchObservedRunningTime="2026-01-05 21:53:42.973940296 +0000 UTC m=+154.551437966" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.003006 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-b7j29" podStartSLOduration=132.002986865 podStartE2EDuration="2m12.002986865s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:43.000734907 +0000 UTC m=+154.578232587" watchObservedRunningTime="2026-01-05 21:53:43.002986865 +0000 UTC m=+154.580484535" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.032995 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-9pllm" podStartSLOduration=9.032974364 podStartE2EDuration="9.032974364s" podCreationTimestamp="2026-01-05 21:53:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:43.031292033 +0000 UTC m=+154.608789703" watchObservedRunningTime="2026-01-05 21:53:43.032974364 +0000 UTC m=+154.610472024" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.065493 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:43 crc kubenswrapper[4910]: E0105 21:53:43.066072 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:43.566050237 +0000 UTC m=+155.143547907 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.070667 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:43 crc kubenswrapper[4910]: E0105 21:53:43.071526 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:43.571500544 +0000 UTC m=+155.148998214 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.106853 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-f56kd" podStartSLOduration=132.106829716 podStartE2EDuration="2m12.106829716s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:43.106244828 +0000 UTC m=+154.683742498" watchObservedRunningTime="2026-01-05 21:53:43.106829716 +0000 UTC m=+154.684327386" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.176255 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:43 crc kubenswrapper[4910]: E0105 21:53:43.176627 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:43.676608434 +0000 UTC m=+155.254106104 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.186331 4910 patch_prober.go:28] interesting pod/router-default-5444994796-fcms5 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 05 21:53:43 crc kubenswrapper[4910]: [-]has-synced failed: reason withheld Jan 05 21:53:43 crc kubenswrapper[4910]: [+]process-running ok Jan 05 21:53:43 crc kubenswrapper[4910]: healthz check failed Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.186406 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-fcms5" podUID="3b0afa0a-d1fe-4c63-a25f-3fd39b954817" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.268369 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2hg8l"] Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.269529 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2hg8l" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.277694 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:43 crc kubenswrapper[4910]: E0105 21:53:43.278280 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:43.778258797 +0000 UTC m=+155.355756467 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.291811 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.379812 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.380221 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zsb2f\" (UniqueName: \"kubernetes.io/projected/340fecda-72dc-4870-887a-29b5ef58ae94-kube-api-access-zsb2f\") pod \"certified-operators-2hg8l\" (UID: \"340fecda-72dc-4870-887a-29b5ef58ae94\") " pod="openshift-marketplace/certified-operators-2hg8l" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.380266 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/340fecda-72dc-4870-887a-29b5ef58ae94-catalog-content\") pod \"certified-operators-2hg8l\" (UID: \"340fecda-72dc-4870-887a-29b5ef58ae94\") " pod="openshift-marketplace/certified-operators-2hg8l" Jan 05 21:53:43 crc kubenswrapper[4910]: E0105 21:53:43.380349 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:43.880302002 +0000 UTC m=+155.457799672 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.380426 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/340fecda-72dc-4870-887a-29b5ef58ae94-utilities\") pod \"certified-operators-2hg8l\" (UID: \"340fecda-72dc-4870-887a-29b5ef58ae94\") " pod="openshift-marketplace/certified-operators-2hg8l" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.388819 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2hg8l"] Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.456479 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-7tvk2"] Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.457716 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7tvk2" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.462652 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.475570 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7tvk2"] Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.483449 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zsb2f\" (UniqueName: \"kubernetes.io/projected/340fecda-72dc-4870-887a-29b5ef58ae94-kube-api-access-zsb2f\") pod \"certified-operators-2hg8l\" (UID: \"340fecda-72dc-4870-887a-29b5ef58ae94\") " pod="openshift-marketplace/certified-operators-2hg8l" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.483593 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/340fecda-72dc-4870-887a-29b5ef58ae94-catalog-content\") pod \"certified-operators-2hg8l\" (UID: \"340fecda-72dc-4870-887a-29b5ef58ae94\") " pod="openshift-marketplace/certified-operators-2hg8l" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.483663 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/340fecda-72dc-4870-887a-29b5ef58ae94-utilities\") pod \"certified-operators-2hg8l\" (UID: \"340fecda-72dc-4870-887a-29b5ef58ae94\") " pod="openshift-marketplace/certified-operators-2hg8l" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.483707 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:43 crc kubenswrapper[4910]: E0105 21:53:43.484150 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:43.984109791 +0000 UTC m=+155.561607461 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.485396 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/340fecda-72dc-4870-887a-29b5ef58ae94-catalog-content\") pod \"certified-operators-2hg8l\" (UID: \"340fecda-72dc-4870-887a-29b5ef58ae94\") " pod="openshift-marketplace/certified-operators-2hg8l" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.485782 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/340fecda-72dc-4870-887a-29b5ef58ae94-utilities\") pod \"certified-operators-2hg8l\" (UID: \"340fecda-72dc-4870-887a-29b5ef58ae94\") " pod="openshift-marketplace/certified-operators-2hg8l" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.535115 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zsb2f\" (UniqueName: \"kubernetes.io/projected/340fecda-72dc-4870-887a-29b5ef58ae94-kube-api-access-zsb2f\") pod \"certified-operators-2hg8l\" (UID: \"340fecda-72dc-4870-887a-29b5ef58ae94\") " pod="openshift-marketplace/certified-operators-2hg8l" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.588498 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.589270 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e67293c9-fc75-468d-b1c5-c09f9ad46dda-utilities\") pod \"community-operators-7tvk2\" (UID: \"e67293c9-fc75-468d-b1c5-c09f9ad46dda\") " pod="openshift-marketplace/community-operators-7tvk2" Jan 05 21:53:43 crc kubenswrapper[4910]: E0105 21:53:43.589421 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:44.089387806 +0000 UTC m=+155.666885476 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.590131 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e67293c9-fc75-468d-b1c5-c09f9ad46dda-catalog-content\") pod \"community-operators-7tvk2\" (UID: \"e67293c9-fc75-468d-b1c5-c09f9ad46dda\") " pod="openshift-marketplace/community-operators-7tvk2" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.590334 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7xvk\" (UniqueName: \"kubernetes.io/projected/e67293c9-fc75-468d-b1c5-c09f9ad46dda-kube-api-access-z7xvk\") pod \"community-operators-7tvk2\" (UID: \"e67293c9-fc75-468d-b1c5-c09f9ad46dda\") " pod="openshift-marketplace/community-operators-7tvk2" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.590521 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:43 crc kubenswrapper[4910]: E0105 21:53:43.591014 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:44.090997595 +0000 UTC m=+155.668495265 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.600487 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2hg8l" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.643956 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-wbhgr"] Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.648900 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wbhgr" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.666162 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wbhgr"] Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.692005 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.692300 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e67293c9-fc75-468d-b1c5-c09f9ad46dda-utilities\") pod \"community-operators-7tvk2\" (UID: \"e67293c9-fc75-468d-b1c5-c09f9ad46dda\") " pod="openshift-marketplace/community-operators-7tvk2" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.692347 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e67293c9-fc75-468d-b1c5-c09f9ad46dda-catalog-content\") pod \"community-operators-7tvk2\" (UID: \"e67293c9-fc75-468d-b1c5-c09f9ad46dda\") " pod="openshift-marketplace/community-operators-7tvk2" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.692379 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7xvk\" (UniqueName: \"kubernetes.io/projected/e67293c9-fc75-468d-b1c5-c09f9ad46dda-kube-api-access-z7xvk\") pod \"community-operators-7tvk2\" (UID: \"e67293c9-fc75-468d-b1c5-c09f9ad46dda\") " pod="openshift-marketplace/community-operators-7tvk2" Jan 05 21:53:43 crc kubenswrapper[4910]: E0105 21:53:43.693043 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:44.19301893 +0000 UTC m=+155.770516600 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.693604 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e67293c9-fc75-468d-b1c5-c09f9ad46dda-utilities\") pod \"community-operators-7tvk2\" (UID: \"e67293c9-fc75-468d-b1c5-c09f9ad46dda\") " pod="openshift-marketplace/community-operators-7tvk2" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.693916 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e67293c9-fc75-468d-b1c5-c09f9ad46dda-catalog-content\") pod \"community-operators-7tvk2\" (UID: \"e67293c9-fc75-468d-b1c5-c09f9ad46dda\") " pod="openshift-marketplace/community-operators-7tvk2" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.725897 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n8vrv" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.734321 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7xvk\" (UniqueName: \"kubernetes.io/projected/e67293c9-fc75-468d-b1c5-c09f9ad46dda-kube-api-access-z7xvk\") pod \"community-operators-7tvk2\" (UID: \"e67293c9-fc75-468d-b1c5-c09f9ad46dda\") " pod="openshift-marketplace/community-operators-7tvk2" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.786395 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7tvk2" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.796251 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ccnl\" (UniqueName: \"kubernetes.io/projected/e6181ab2-b292-4e7d-b30e-ec724946700c-kube-api-access-8ccnl\") pod \"certified-operators-wbhgr\" (UID: \"e6181ab2-b292-4e7d-b30e-ec724946700c\") " pod="openshift-marketplace/certified-operators-wbhgr" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.796293 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6181ab2-b292-4e7d-b30e-ec724946700c-catalog-content\") pod \"certified-operators-wbhgr\" (UID: \"e6181ab2-b292-4e7d-b30e-ec724946700c\") " pod="openshift-marketplace/certified-operators-wbhgr" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.796316 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6181ab2-b292-4e7d-b30e-ec724946700c-utilities\") pod \"certified-operators-wbhgr\" (UID: \"e6181ab2-b292-4e7d-b30e-ec724946700c\") " pod="openshift-marketplace/certified-operators-wbhgr" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.796381 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:43 crc kubenswrapper[4910]: E0105 21:53:43.796744 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:44.296728986 +0000 UTC m=+155.874226656 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.850090 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-gnvct"] Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.853266 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gnvct" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.870106 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gnvct"] Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.897993 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.898161 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93cdd0f0-6faf-4d13-b090-21afa1ae8f76-catalog-content\") pod \"community-operators-gnvct\" (UID: \"93cdd0f0-6faf-4d13-b090-21afa1ae8f76\") " pod="openshift-marketplace/community-operators-gnvct" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.898194 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93cdd0f0-6faf-4d13-b090-21afa1ae8f76-utilities\") pod \"community-operators-gnvct\" (UID: \"93cdd0f0-6faf-4d13-b090-21afa1ae8f76\") " pod="openshift-marketplace/community-operators-gnvct" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.898253 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qfsm\" (UniqueName: \"kubernetes.io/projected/93cdd0f0-6faf-4d13-b090-21afa1ae8f76-kube-api-access-4qfsm\") pod \"community-operators-gnvct\" (UID: \"93cdd0f0-6faf-4d13-b090-21afa1ae8f76\") " pod="openshift-marketplace/community-operators-gnvct" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.898281 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8ccnl\" (UniqueName: \"kubernetes.io/projected/e6181ab2-b292-4e7d-b30e-ec724946700c-kube-api-access-8ccnl\") pod \"certified-operators-wbhgr\" (UID: \"e6181ab2-b292-4e7d-b30e-ec724946700c\") " pod="openshift-marketplace/certified-operators-wbhgr" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.898308 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6181ab2-b292-4e7d-b30e-ec724946700c-catalog-content\") pod \"certified-operators-wbhgr\" (UID: \"e6181ab2-b292-4e7d-b30e-ec724946700c\") " pod="openshift-marketplace/certified-operators-wbhgr" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.898338 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6181ab2-b292-4e7d-b30e-ec724946700c-utilities\") pod \"certified-operators-wbhgr\" (UID: \"e6181ab2-b292-4e7d-b30e-ec724946700c\") " pod="openshift-marketplace/certified-operators-wbhgr" Jan 05 21:53:43 crc kubenswrapper[4910]: E0105 21:53:43.898933 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:44.398864595 +0000 UTC m=+155.976362265 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.899347 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6181ab2-b292-4e7d-b30e-ec724946700c-catalog-content\") pod \"certified-operators-wbhgr\" (UID: \"e6181ab2-b292-4e7d-b30e-ec724946700c\") " pod="openshift-marketplace/certified-operators-wbhgr" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.898964 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6181ab2-b292-4e7d-b30e-ec724946700c-utilities\") pod \"certified-operators-wbhgr\" (UID: \"e6181ab2-b292-4e7d-b30e-ec724946700c\") " pod="openshift-marketplace/certified-operators-wbhgr" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.926928 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ccnl\" (UniqueName: \"kubernetes.io/projected/e6181ab2-b292-4e7d-b30e-ec724946700c-kube-api-access-8ccnl\") pod \"certified-operators-wbhgr\" (UID: \"e6181ab2-b292-4e7d-b30e-ec724946700c\") " pod="openshift-marketplace/certified-operators-wbhgr" Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.981707 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-4lmhx" event={"ID":"fcd4f024-3377-4bda-8dfd-bced91254447","Type":"ContainerStarted","Data":"3d2e05aa7e8b3a619ca9ec0126bf036d1e7a7044f570e5d1dff8528233979315"} Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.981746 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-4lmhx" event={"ID":"fcd4f024-3377-4bda-8dfd-bced91254447","Type":"ContainerStarted","Data":"051ffc2599fb80616bfb20d487dad77de72a6d1f04c04ec7994d416df87cefe8"} Jan 05 21:53:43 crc kubenswrapper[4910]: I0105 21:53:43.990554 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-llpdj" Jan 05 21:53:44 crc kubenswrapper[4910]: I0105 21:53:44.000655 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:44 crc kubenswrapper[4910]: I0105 21:53:44.000734 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93cdd0f0-6faf-4d13-b090-21afa1ae8f76-catalog-content\") pod \"community-operators-gnvct\" (UID: \"93cdd0f0-6faf-4d13-b090-21afa1ae8f76\") " pod="openshift-marketplace/community-operators-gnvct" Jan 05 21:53:44 crc kubenswrapper[4910]: I0105 21:53:44.000802 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93cdd0f0-6faf-4d13-b090-21afa1ae8f76-utilities\") pod \"community-operators-gnvct\" (UID: \"93cdd0f0-6faf-4d13-b090-21afa1ae8f76\") " pod="openshift-marketplace/community-operators-gnvct" Jan 05 21:53:44 crc kubenswrapper[4910]: I0105 21:53:44.001022 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qfsm\" (UniqueName: \"kubernetes.io/projected/93cdd0f0-6faf-4d13-b090-21afa1ae8f76-kube-api-access-4qfsm\") pod \"community-operators-gnvct\" (UID: \"93cdd0f0-6faf-4d13-b090-21afa1ae8f76\") " pod="openshift-marketplace/community-operators-gnvct" Jan 05 21:53:44 crc kubenswrapper[4910]: I0105 21:53:44.005806 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93cdd0f0-6faf-4d13-b090-21afa1ae8f76-catalog-content\") pod \"community-operators-gnvct\" (UID: \"93cdd0f0-6faf-4d13-b090-21afa1ae8f76\") " pod="openshift-marketplace/community-operators-gnvct" Jan 05 21:53:44 crc kubenswrapper[4910]: E0105 21:53:44.008177 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:44.508150672 +0000 UTC m=+156.085648512 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:44 crc kubenswrapper[4910]: I0105 21:53:44.008569 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93cdd0f0-6faf-4d13-b090-21afa1ae8f76-utilities\") pod \"community-operators-gnvct\" (UID: \"93cdd0f0-6faf-4d13-b090-21afa1ae8f76\") " pod="openshift-marketplace/community-operators-gnvct" Jan 05 21:53:44 crc kubenswrapper[4910]: I0105 21:53:44.030284 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wbhgr" Jan 05 21:53:44 crc kubenswrapper[4910]: I0105 21:53:44.054952 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qfsm\" (UniqueName: \"kubernetes.io/projected/93cdd0f0-6faf-4d13-b090-21afa1ae8f76-kube-api-access-4qfsm\") pod \"community-operators-gnvct\" (UID: \"93cdd0f0-6faf-4d13-b090-21afa1ae8f76\") " pod="openshift-marketplace/community-operators-gnvct" Jan 05 21:53:44 crc kubenswrapper[4910]: I0105 21:53:44.101618 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:44 crc kubenswrapper[4910]: E0105 21:53:44.101917 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:44.601893693 +0000 UTC m=+156.179391363 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:44 crc kubenswrapper[4910]: I0105 21:53:44.178968 4910 patch_prober.go:28] interesting pod/router-default-5444994796-fcms5 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 05 21:53:44 crc kubenswrapper[4910]: [-]has-synced failed: reason withheld Jan 05 21:53:44 crc kubenswrapper[4910]: [+]process-running ok Jan 05 21:53:44 crc kubenswrapper[4910]: healthz check failed Jan 05 21:53:44 crc kubenswrapper[4910]: I0105 21:53:44.179069 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-fcms5" podUID="3b0afa0a-d1fe-4c63-a25f-3fd39b954817" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 05 21:53:44 crc kubenswrapper[4910]: I0105 21:53:44.188591 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gnvct" Jan 05 21:53:44 crc kubenswrapper[4910]: I0105 21:53:44.202806 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:44 crc kubenswrapper[4910]: E0105 21:53:44.203458 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:44.703435574 +0000 UTC m=+156.280933244 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:44 crc kubenswrapper[4910]: I0105 21:53:44.210276 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2hg8l"] Jan 05 21:53:44 crc kubenswrapper[4910]: I0105 21:53:44.287594 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7tvk2"] Jan 05 21:53:44 crc kubenswrapper[4910]: I0105 21:53:44.304528 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:44 crc kubenswrapper[4910]: E0105 21:53:44.304978 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:44.804944333 +0000 UTC m=+156.382442003 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:44 crc kubenswrapper[4910]: I0105 21:53:44.409274 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:44 crc kubenswrapper[4910]: E0105 21:53:44.409627 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:44.909612729 +0000 UTC m=+156.487110399 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:44 crc kubenswrapper[4910]: I0105 21:53:44.449920 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wbhgr"] Jan 05 21:53:44 crc kubenswrapper[4910]: I0105 21:53:44.510673 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:44 crc kubenswrapper[4910]: E0105 21:53:44.510992 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:45.010970303 +0000 UTC m=+156.588467973 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:44 crc kubenswrapper[4910]: I0105 21:53:44.612706 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:44 crc kubenswrapper[4910]: E0105 21:53:44.613060 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:45.11304773 +0000 UTC m=+156.690545400 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:44 crc kubenswrapper[4910]: I0105 21:53:44.672114 4910 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Jan 05 21:53:44 crc kubenswrapper[4910]: I0105 21:53:44.713511 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:44 crc kubenswrapper[4910]: E0105 21:53:44.713796 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:45.213774955 +0000 UTC m=+156.791272625 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:44 crc kubenswrapper[4910]: I0105 21:53:44.759920 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gnvct"] Jan 05 21:53:44 crc kubenswrapper[4910]: W0105 21:53:44.763835 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod93cdd0f0_6faf_4d13_b090_21afa1ae8f76.slice/crio-1ecd8d2cc3bdb5f5163b1a0b90f5d03d7e66b1f79cc4079b0f293173ef6b1a2b WatchSource:0}: Error finding container 1ecd8d2cc3bdb5f5163b1a0b90f5d03d7e66b1f79cc4079b0f293173ef6b1a2b: Status 404 returned error can't find the container with id 1ecd8d2cc3bdb5f5163b1a0b90f5d03d7e66b1f79cc4079b0f293173ef6b1a2b Jan 05 21:53:44 crc kubenswrapper[4910]: I0105 21:53:44.815956 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:44 crc kubenswrapper[4910]: E0105 21:53:44.816439 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:45.316418799 +0000 UTC m=+156.893916469 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:44 crc kubenswrapper[4910]: I0105 21:53:44.916536 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:44 crc kubenswrapper[4910]: E0105 21:53:44.916680 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:45.416656089 +0000 UTC m=+156.994153759 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:44 crc kubenswrapper[4910]: I0105 21:53:44.916837 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:44 crc kubenswrapper[4910]: E0105 21:53:44.917220 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:45.417212666 +0000 UTC m=+156.994710336 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:44 crc kubenswrapper[4910]: I0105 21:53:44.992879 4910 generic.go:334] "Generic (PLEG): container finished" podID="ee40a1b4-967e-40aa-b6c0-eaf211346941" containerID="a057eee485ddc3526ec9ba8e9e55caed65d80e741204ef899ff001441b02e82f" exitCode=0 Jan 05 21:53:44 crc kubenswrapper[4910]: I0105 21:53:44.993012 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29460825-hv5kn" event={"ID":"ee40a1b4-967e-40aa-b6c0-eaf211346941","Type":"ContainerDied","Data":"a057eee485ddc3526ec9ba8e9e55caed65d80e741204ef899ff001441b02e82f"} Jan 05 21:53:44 crc kubenswrapper[4910]: I0105 21:53:44.999212 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-4lmhx" event={"ID":"fcd4f024-3377-4bda-8dfd-bced91254447","Type":"ContainerStarted","Data":"8fc5b94c4317d6e3f7661add3825524c6a500027075adf52a1b9ae763b73417b"} Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.003974 4910 generic.go:334] "Generic (PLEG): container finished" podID="e67293c9-fc75-468d-b1c5-c09f9ad46dda" containerID="cdbfba9fe0e865807ebc0dbe4bd01a9cb1d24ea84c2043d2cc54f3f70da09b15" exitCode=0 Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.004040 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7tvk2" event={"ID":"e67293c9-fc75-468d-b1c5-c09f9ad46dda","Type":"ContainerDied","Data":"cdbfba9fe0e865807ebc0dbe4bd01a9cb1d24ea84c2043d2cc54f3f70da09b15"} Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.004112 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7tvk2" event={"ID":"e67293c9-fc75-468d-b1c5-c09f9ad46dda","Type":"ContainerStarted","Data":"dcc3a3a2b74af73c57e86fa4a0fcd79b9a9b81245dc52e394ea1827c92cc0991"} Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.008446 4910 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.013047 4910 generic.go:334] "Generic (PLEG): container finished" podID="340fecda-72dc-4870-887a-29b5ef58ae94" containerID="5bb650890246204edae60743a4a55732e01aa7d7a93710aa3ba1556b62515f6a" exitCode=0 Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.013338 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2hg8l" event={"ID":"340fecda-72dc-4870-887a-29b5ef58ae94","Type":"ContainerDied","Data":"5bb650890246204edae60743a4a55732e01aa7d7a93710aa3ba1556b62515f6a"} Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.013389 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2hg8l" event={"ID":"340fecda-72dc-4870-887a-29b5ef58ae94","Type":"ContainerStarted","Data":"081053c12208e240cd041d0dd51d64b96b506715362717423904103744dc15c8"} Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.016450 4910 generic.go:334] "Generic (PLEG): container finished" podID="e6181ab2-b292-4e7d-b30e-ec724946700c" containerID="1e1e03a4588bd995d0066b850743aab6118d35030c133dfc31ade0f9a78e2c5b" exitCode=0 Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.016498 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wbhgr" event={"ID":"e6181ab2-b292-4e7d-b30e-ec724946700c","Type":"ContainerDied","Data":"1e1e03a4588bd995d0066b850743aab6118d35030c133dfc31ade0f9a78e2c5b"} Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.016520 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wbhgr" event={"ID":"e6181ab2-b292-4e7d-b30e-ec724946700c","Type":"ContainerStarted","Data":"3233301218f1b7f1b73dda7a82aada9098161b42124c95190d6f9d50f5a0cb52"} Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.017597 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:45 crc kubenswrapper[4910]: E0105 21:53:45.018015 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-05 21:53:45.517879449 +0000 UTC m=+157.095377119 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.018386 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:45 crc kubenswrapper[4910]: E0105 21:53:45.018927 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-05 21:53:45.518906331 +0000 UTC m=+157.096404001 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xg5fl" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.019008 4910 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-01-05T21:53:44.672181531Z","Handler":null,"Name":""} Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.019162 4910 generic.go:334] "Generic (PLEG): container finished" podID="93cdd0f0-6faf-4d13-b090-21afa1ae8f76" containerID="0fc196719dd50a4e78c57b2b67a47231aef6776056e67108568ddc5972cea35a" exitCode=0 Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.019280 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gnvct" event={"ID":"93cdd0f0-6faf-4d13-b090-21afa1ae8f76","Type":"ContainerDied","Data":"0fc196719dd50a4e78c57b2b67a47231aef6776056e67108568ddc5972cea35a"} Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.019316 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gnvct" event={"ID":"93cdd0f0-6faf-4d13-b090-21afa1ae8f76","Type":"ContainerStarted","Data":"1ecd8d2cc3bdb5f5163b1a0b90f5d03d7e66b1f79cc4079b0f293173ef6b1a2b"} Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.030721 4910 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.030759 4910 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.043745 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-4lmhx" podStartSLOduration=11.043725341 podStartE2EDuration="11.043725341s" podCreationTimestamp="2026-01-05 21:53:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:45.036340195 +0000 UTC m=+156.613837855" watchObservedRunningTime="2026-01-05 21:53:45.043725341 +0000 UTC m=+156.621223011" Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.119970 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.125805 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.177709 4910 patch_prober.go:28] interesting pod/router-default-5444994796-fcms5 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 05 21:53:45 crc kubenswrapper[4910]: [-]has-synced failed: reason withheld Jan 05 21:53:45 crc kubenswrapper[4910]: [+]process-running ok Jan 05 21:53:45 crc kubenswrapper[4910]: healthz check failed Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.177778 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-fcms5" podUID="3b0afa0a-d1fe-4c63-a25f-3fd39b954817" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.222716 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.234413 4910 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.234478 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.257215 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xg5fl\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.361270 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.450439 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-pzbmf"] Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.458681 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pzbmf" Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.465911 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.478849 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pzbmf"] Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.531850 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mvgsn\" (UniqueName: \"kubernetes.io/projected/e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e-kube-api-access-mvgsn\") pod \"redhat-marketplace-pzbmf\" (UID: \"e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e\") " pod="openshift-marketplace/redhat-marketplace-pzbmf" Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.531910 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e-catalog-content\") pod \"redhat-marketplace-pzbmf\" (UID: \"e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e\") " pod="openshift-marketplace/redhat-marketplace-pzbmf" Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.532030 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e-utilities\") pod \"redhat-marketplace-pzbmf\" (UID: \"e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e\") " pod="openshift-marketplace/redhat-marketplace-pzbmf" Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.535892 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pz96p" Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.633345 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mvgsn\" (UniqueName: \"kubernetes.io/projected/e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e-kube-api-access-mvgsn\") pod \"redhat-marketplace-pzbmf\" (UID: \"e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e\") " pod="openshift-marketplace/redhat-marketplace-pzbmf" Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.633411 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e-catalog-content\") pod \"redhat-marketplace-pzbmf\" (UID: \"e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e\") " pod="openshift-marketplace/redhat-marketplace-pzbmf" Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.633471 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e-utilities\") pod \"redhat-marketplace-pzbmf\" (UID: \"e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e\") " pod="openshift-marketplace/redhat-marketplace-pzbmf" Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.634438 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e-utilities\") pod \"redhat-marketplace-pzbmf\" (UID: \"e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e\") " pod="openshift-marketplace/redhat-marketplace-pzbmf" Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.635389 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e-catalog-content\") pod \"redhat-marketplace-pzbmf\" (UID: \"e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e\") " pod="openshift-marketplace/redhat-marketplace-pzbmf" Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.652668 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-xg5fl"] Jan 05 21:53:45 crc kubenswrapper[4910]: W0105 21:53:45.662058 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcb24f5dd_82b6_4a8e_8e86_b639a8435bf8.slice/crio-81ce46da37b8920fc256e98069a7b4e197e0aee4b128ccaaa8b41c707d97279e WatchSource:0}: Error finding container 81ce46da37b8920fc256e98069a7b4e197e0aee4b128ccaaa8b41c707d97279e: Status 404 returned error can't find the container with id 81ce46da37b8920fc256e98069a7b4e197e0aee4b128ccaaa8b41c707d97279e Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.662758 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mvgsn\" (UniqueName: \"kubernetes.io/projected/e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e-kube-api-access-mvgsn\") pod \"redhat-marketplace-pzbmf\" (UID: \"e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e\") " pod="openshift-marketplace/redhat-marketplace-pzbmf" Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.842734 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pzbmf" Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.851948 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-bz5df"] Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.859191 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bz5df" Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.882456 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bz5df"] Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.943519 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aef8fca5-e47a-4942-8f59-42731aa77419-catalog-content\") pod \"redhat-marketplace-bz5df\" (UID: \"aef8fca5-e47a-4942-8f59-42731aa77419\") " pod="openshift-marketplace/redhat-marketplace-bz5df" Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.943601 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aef8fca5-e47a-4942-8f59-42731aa77419-utilities\") pod \"redhat-marketplace-bz5df\" (UID: \"aef8fca5-e47a-4942-8f59-42731aa77419\") " pod="openshift-marketplace/redhat-marketplace-bz5df" Jan 05 21:53:45 crc kubenswrapper[4910]: I0105 21:53:45.943677 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8gqh6\" (UniqueName: \"kubernetes.io/projected/aef8fca5-e47a-4942-8f59-42731aa77419-kube-api-access-8gqh6\") pod \"redhat-marketplace-bz5df\" (UID: \"aef8fca5-e47a-4942-8f59-42731aa77419\") " pod="openshift-marketplace/redhat-marketplace-bz5df" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.030213 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" event={"ID":"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8","Type":"ContainerStarted","Data":"ceb2190978cd52cf404ea8a77a5ef48bd7dd4a9ff589627d508e196cde33480f"} Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.030264 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" event={"ID":"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8","Type":"ContainerStarted","Data":"81ce46da37b8920fc256e98069a7b4e197e0aee4b128ccaaa8b41c707d97279e"} Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.030471 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.046448 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8gqh6\" (UniqueName: \"kubernetes.io/projected/aef8fca5-e47a-4942-8f59-42731aa77419-kube-api-access-8gqh6\") pod \"redhat-marketplace-bz5df\" (UID: \"aef8fca5-e47a-4942-8f59-42731aa77419\") " pod="openshift-marketplace/redhat-marketplace-bz5df" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.046507 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aef8fca5-e47a-4942-8f59-42731aa77419-catalog-content\") pod \"redhat-marketplace-bz5df\" (UID: \"aef8fca5-e47a-4942-8f59-42731aa77419\") " pod="openshift-marketplace/redhat-marketplace-bz5df" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.046555 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aef8fca5-e47a-4942-8f59-42731aa77419-utilities\") pod \"redhat-marketplace-bz5df\" (UID: \"aef8fca5-e47a-4942-8f59-42731aa77419\") " pod="openshift-marketplace/redhat-marketplace-bz5df" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.046972 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aef8fca5-e47a-4942-8f59-42731aa77419-utilities\") pod \"redhat-marketplace-bz5df\" (UID: \"aef8fca5-e47a-4942-8f59-42731aa77419\") " pod="openshift-marketplace/redhat-marketplace-bz5df" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.047439 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aef8fca5-e47a-4942-8f59-42731aa77419-catalog-content\") pod \"redhat-marketplace-bz5df\" (UID: \"aef8fca5-e47a-4942-8f59-42731aa77419\") " pod="openshift-marketplace/redhat-marketplace-bz5df" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.056952 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" podStartSLOduration=135.056931505 podStartE2EDuration="2m15.056931505s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:46.05187194 +0000 UTC m=+157.629369610" watchObservedRunningTime="2026-01-05 21:53:46.056931505 +0000 UTC m=+157.634429175" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.071858 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8gqh6\" (UniqueName: \"kubernetes.io/projected/aef8fca5-e47a-4942-8f59-42731aa77419-kube-api-access-8gqh6\") pod \"redhat-marketplace-bz5df\" (UID: \"aef8fca5-e47a-4942-8f59-42731aa77419\") " pod="openshift-marketplace/redhat-marketplace-bz5df" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.074946 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.075716 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.079233 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.079582 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.080249 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.179505 4910 patch_prober.go:28] interesting pod/router-default-5444994796-fcms5 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 05 21:53:46 crc kubenswrapper[4910]: [-]has-synced failed: reason withheld Jan 05 21:53:46 crc kubenswrapper[4910]: [+]process-running ok Jan 05 21:53:46 crc kubenswrapper[4910]: healthz check failed Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.179988 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-fcms5" podUID="3b0afa0a-d1fe-4c63-a25f-3fd39b954817" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.195302 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bz5df" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.253976 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8f26ac8e-1e9a-4b95-8074-8bc02f7f041d-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"8f26ac8e-1e9a-4b95-8074-8bc02f7f041d\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.254173 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/8f26ac8e-1e9a-4b95-8074-8bc02f7f041d-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"8f26ac8e-1e9a-4b95-8074-8bc02f7f041d\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.256380 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-g5xxj" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.256788 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-g5xxj" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.275086 4910 patch_prober.go:28] interesting pod/console-f9d7485db-g5xxj container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.275324 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-g5xxj" podUID="8cef9cdb-d8f8-406b-8575-6a6d1b72a638" containerName="console" probeResult="failure" output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.357747 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8f26ac8e-1e9a-4b95-8074-8bc02f7f041d-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"8f26ac8e-1e9a-4b95-8074-8bc02f7f041d\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.357822 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/8f26ac8e-1e9a-4b95-8074-8bc02f7f041d-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"8f26ac8e-1e9a-4b95-8074-8bc02f7f041d\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.358821 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/8f26ac8e-1e9a-4b95-8074-8bc02f7f041d-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"8f26ac8e-1e9a-4b95-8074-8bc02f7f041d\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.368795 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.368853 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.399517 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.400242 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.400283 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.414373 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8f26ac8e-1e9a-4b95-8074-8bc02f7f041d-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"8f26ac8e-1e9a-4b95-8074-8bc02f7f041d\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.442670 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.472093 4910 patch_prober.go:28] interesting pod/downloads-7954f5f757-k2d98 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.472179 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-k2d98" podUID="b859ccb0-eb52-4086-8db1-cf1543b934d9" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.472635 4910 patch_prober.go:28] interesting pod/downloads-7954f5f757-k2d98 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.472653 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-k2d98" podUID="b859ccb0-eb52-4086-8db1-cf1543b934d9" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.485408 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-ghpct"] Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.488513 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ghpct" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.491619 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.498790 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ghpct"] Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.610818 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pzbmf"] Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.636491 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460825-hv5kn" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.670866 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tx5p5\" (UniqueName: \"kubernetes.io/projected/ee40a1b4-967e-40aa-b6c0-eaf211346941-kube-api-access-tx5p5\") pod \"ee40a1b4-967e-40aa-b6c0-eaf211346941\" (UID: \"ee40a1b4-967e-40aa-b6c0-eaf211346941\") " Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.670930 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ee40a1b4-967e-40aa-b6c0-eaf211346941-config-volume\") pod \"ee40a1b4-967e-40aa-b6c0-eaf211346941\" (UID: \"ee40a1b4-967e-40aa-b6c0-eaf211346941\") " Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.670978 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ee40a1b4-967e-40aa-b6c0-eaf211346941-secret-volume\") pod \"ee40a1b4-967e-40aa-b6c0-eaf211346941\" (UID: \"ee40a1b4-967e-40aa-b6c0-eaf211346941\") " Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.671208 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/060b3be3-5d9d-47dc-a01e-7a79aa9f13b4-utilities\") pod \"redhat-operators-ghpct\" (UID: \"060b3be3-5d9d-47dc-a01e-7a79aa9f13b4\") " pod="openshift-marketplace/redhat-operators-ghpct" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.671253 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-967cg\" (UniqueName: \"kubernetes.io/projected/060b3be3-5d9d-47dc-a01e-7a79aa9f13b4-kube-api-access-967cg\") pod \"redhat-operators-ghpct\" (UID: \"060b3be3-5d9d-47dc-a01e-7a79aa9f13b4\") " pod="openshift-marketplace/redhat-operators-ghpct" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.671289 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/060b3be3-5d9d-47dc-a01e-7a79aa9f13b4-catalog-content\") pod \"redhat-operators-ghpct\" (UID: \"060b3be3-5d9d-47dc-a01e-7a79aa9f13b4\") " pod="openshift-marketplace/redhat-operators-ghpct" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.674584 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee40a1b4-967e-40aa-b6c0-eaf211346941-config-volume" (OuterVolumeSpecName: "config-volume") pod "ee40a1b4-967e-40aa-b6c0-eaf211346941" (UID: "ee40a1b4-967e-40aa-b6c0-eaf211346941"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.682742 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee40a1b4-967e-40aa-b6c0-eaf211346941-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ee40a1b4-967e-40aa-b6c0-eaf211346941" (UID: "ee40a1b4-967e-40aa-b6c0-eaf211346941"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.684535 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee40a1b4-967e-40aa-b6c0-eaf211346941-kube-api-access-tx5p5" (OuterVolumeSpecName: "kube-api-access-tx5p5") pod "ee40a1b4-967e-40aa-b6c0-eaf211346941" (UID: "ee40a1b4-967e-40aa-b6c0-eaf211346941"). InnerVolumeSpecName "kube-api-access-tx5p5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.696621 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.720843 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bz5df"] Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.752953 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.772102 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/060b3be3-5d9d-47dc-a01e-7a79aa9f13b4-utilities\") pod \"redhat-operators-ghpct\" (UID: \"060b3be3-5d9d-47dc-a01e-7a79aa9f13b4\") " pod="openshift-marketplace/redhat-operators-ghpct" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.772175 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-967cg\" (UniqueName: \"kubernetes.io/projected/060b3be3-5d9d-47dc-a01e-7a79aa9f13b4-kube-api-access-967cg\") pod \"redhat-operators-ghpct\" (UID: \"060b3be3-5d9d-47dc-a01e-7a79aa9f13b4\") " pod="openshift-marketplace/redhat-operators-ghpct" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.772214 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/060b3be3-5d9d-47dc-a01e-7a79aa9f13b4-catalog-content\") pod \"redhat-operators-ghpct\" (UID: \"060b3be3-5d9d-47dc-a01e-7a79aa9f13b4\") " pod="openshift-marketplace/redhat-operators-ghpct" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.772271 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tx5p5\" (UniqueName: \"kubernetes.io/projected/ee40a1b4-967e-40aa-b6c0-eaf211346941-kube-api-access-tx5p5\") on node \"crc\" DevicePath \"\"" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.772284 4910 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ee40a1b4-967e-40aa-b6c0-eaf211346941-config-volume\") on node \"crc\" DevicePath \"\"" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.772297 4910 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ee40a1b4-967e-40aa-b6c0-eaf211346941-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.772677 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/060b3be3-5d9d-47dc-a01e-7a79aa9f13b4-utilities\") pod \"redhat-operators-ghpct\" (UID: \"060b3be3-5d9d-47dc-a01e-7a79aa9f13b4\") " pod="openshift-marketplace/redhat-operators-ghpct" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.772694 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/060b3be3-5d9d-47dc-a01e-7a79aa9f13b4-catalog-content\") pod \"redhat-operators-ghpct\" (UID: \"060b3be3-5d9d-47dc-a01e-7a79aa9f13b4\") " pod="openshift-marketplace/redhat-operators-ghpct" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.792033 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-967cg\" (UniqueName: \"kubernetes.io/projected/060b3be3-5d9d-47dc-a01e-7a79aa9f13b4-kube-api-access-967cg\") pod \"redhat-operators-ghpct\" (UID: \"060b3be3-5d9d-47dc-a01e-7a79aa9f13b4\") " pod="openshift-marketplace/redhat-operators-ghpct" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.844727 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-9lxqq"] Jan 05 21:53:46 crc kubenswrapper[4910]: E0105 21:53:46.845229 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee40a1b4-967e-40aa-b6c0-eaf211346941" containerName="collect-profiles" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.845251 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee40a1b4-967e-40aa-b6c0-eaf211346941" containerName="collect-profiles" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.845371 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee40a1b4-967e-40aa-b6c0-eaf211346941" containerName="collect-profiles" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.846431 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9lxqq" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.854822 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ghpct" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.857576 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9lxqq"] Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.872926 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-db6wn\" (UniqueName: \"kubernetes.io/projected/554221c9-a077-40a9-a756-a9589d845ef7-kube-api-access-db6wn\") pod \"redhat-operators-9lxqq\" (UID: \"554221c9-a077-40a9-a756-a9589d845ef7\") " pod="openshift-marketplace/redhat-operators-9lxqq" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.873333 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/554221c9-a077-40a9-a756-a9589d845ef7-utilities\") pod \"redhat-operators-9lxqq\" (UID: \"554221c9-a077-40a9-a756-a9589d845ef7\") " pod="openshift-marketplace/redhat-operators-9lxqq" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.873393 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/554221c9-a077-40a9-a756-a9589d845ef7-catalog-content\") pod \"redhat-operators-9lxqq\" (UID: \"554221c9-a077-40a9-a756-a9589d845ef7\") " pod="openshift-marketplace/redhat-operators-9lxqq" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.974536 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-db6wn\" (UniqueName: \"kubernetes.io/projected/554221c9-a077-40a9-a756-a9589d845ef7-kube-api-access-db6wn\") pod \"redhat-operators-9lxqq\" (UID: \"554221c9-a077-40a9-a756-a9589d845ef7\") " pod="openshift-marketplace/redhat-operators-9lxqq" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.974600 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/554221c9-a077-40a9-a756-a9589d845ef7-utilities\") pod \"redhat-operators-9lxqq\" (UID: \"554221c9-a077-40a9-a756-a9589d845ef7\") " pod="openshift-marketplace/redhat-operators-9lxqq" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.974649 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/554221c9-a077-40a9-a756-a9589d845ef7-catalog-content\") pod \"redhat-operators-9lxqq\" (UID: \"554221c9-a077-40a9-a756-a9589d845ef7\") " pod="openshift-marketplace/redhat-operators-9lxqq" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.975445 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/554221c9-a077-40a9-a756-a9589d845ef7-catalog-content\") pod \"redhat-operators-9lxqq\" (UID: \"554221c9-a077-40a9-a756-a9589d845ef7\") " pod="openshift-marketplace/redhat-operators-9lxqq" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.975665 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/554221c9-a077-40a9-a756-a9589d845ef7-utilities\") pod \"redhat-operators-9lxqq\" (UID: \"554221c9-a077-40a9-a756-a9589d845ef7\") " pod="openshift-marketplace/redhat-operators-9lxqq" Jan 05 21:53:46 crc kubenswrapper[4910]: I0105 21:53:46.995256 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-db6wn\" (UniqueName: \"kubernetes.io/projected/554221c9-a077-40a9-a756-a9589d845ef7-kube-api-access-db6wn\") pod \"redhat-operators-9lxqq\" (UID: \"554221c9-a077-40a9-a756-a9589d845ef7\") " pod="openshift-marketplace/redhat-operators-9lxqq" Jan 05 21:53:47 crc kubenswrapper[4910]: I0105 21:53:47.054191 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 05 21:53:47 crc kubenswrapper[4910]: I0105 21:53:47.056832 4910 generic.go:334] "Generic (PLEG): container finished" podID="e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e" containerID="a309e408734a42d7078f5bc3aec4bbca064fb3f412a82f1c4c92ee3c5f5f06f0" exitCode=0 Jan 05 21:53:47 crc kubenswrapper[4910]: I0105 21:53:47.056966 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pzbmf" event={"ID":"e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e","Type":"ContainerDied","Data":"a309e408734a42d7078f5bc3aec4bbca064fb3f412a82f1c4c92ee3c5f5f06f0"} Jan 05 21:53:47 crc kubenswrapper[4910]: I0105 21:53:47.057024 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pzbmf" event={"ID":"e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e","Type":"ContainerStarted","Data":"fb58bf1adf8ea4e43e1d3966ba8c01bc94ae2842893145ffbf78e2f0d5ee6878"} Jan 05 21:53:47 crc kubenswrapper[4910]: I0105 21:53:47.112484 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460825-hv5kn" Jan 05 21:53:47 crc kubenswrapper[4910]: I0105 21:53:47.113373 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29460825-hv5kn" event={"ID":"ee40a1b4-967e-40aa-b6c0-eaf211346941","Type":"ContainerDied","Data":"39fcb23d1bc88255d10b9dad6c2f83831fbf190d1312d6790dbbbbd18e55396e"} Jan 05 21:53:47 crc kubenswrapper[4910]: I0105 21:53:47.113436 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="39fcb23d1bc88255d10b9dad6c2f83831fbf190d1312d6790dbbbbd18e55396e" Jan 05 21:53:47 crc kubenswrapper[4910]: I0105 21:53:47.148473 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bz5df" event={"ID":"aef8fca5-e47a-4942-8f59-42731aa77419","Type":"ContainerStarted","Data":"a86b1a05555c7de4cc3d6cf483140513db4ef46822b02d48e0b9a8cd7525588f"} Jan 05 21:53:47 crc kubenswrapper[4910]: I0105 21:53:47.161474 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-bb2c8" Jan 05 21:53:47 crc kubenswrapper[4910]: I0105 21:53:47.162909 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-f56kd" Jan 05 21:53:47 crc kubenswrapper[4910]: I0105 21:53:47.175798 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-fcms5" Jan 05 21:53:47 crc kubenswrapper[4910]: I0105 21:53:47.177725 4910 patch_prober.go:28] interesting pod/router-default-5444994796-fcms5 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 05 21:53:47 crc kubenswrapper[4910]: [-]has-synced failed: reason withheld Jan 05 21:53:47 crc kubenswrapper[4910]: [+]process-running ok Jan 05 21:53:47 crc kubenswrapper[4910]: healthz check failed Jan 05 21:53:47 crc kubenswrapper[4910]: I0105 21:53:47.177759 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-fcms5" podUID="3b0afa0a-d1fe-4c63-a25f-3fd39b954817" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 05 21:53:47 crc kubenswrapper[4910]: I0105 21:53:47.178995 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9lxqq" Jan 05 21:53:47 crc kubenswrapper[4910]: I0105 21:53:47.302237 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ghpct"] Jan 05 21:53:47 crc kubenswrapper[4910]: I0105 21:53:47.642504 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9lxqq"] Jan 05 21:53:48 crc kubenswrapper[4910]: I0105 21:53:48.155700 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"8f26ac8e-1e9a-4b95-8074-8bc02f7f041d","Type":"ContainerStarted","Data":"8d55f816a844fe02c54710d458c8bf7473581d6214d375da980cd1de499cea80"} Jan 05 21:53:48 crc kubenswrapper[4910]: I0105 21:53:48.156108 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"8f26ac8e-1e9a-4b95-8074-8bc02f7f041d","Type":"ContainerStarted","Data":"af11c586961696165d60eb6a15cc1b5d39c7a74f006a0e4cd76c42248f261e09"} Jan 05 21:53:48 crc kubenswrapper[4910]: I0105 21:53:48.161852 4910 generic.go:334] "Generic (PLEG): container finished" podID="aef8fca5-e47a-4942-8f59-42731aa77419" containerID="1ad84bad5c219f9535fd051f0a04f2f99c192ffe95b26f01de4a34eac5bf1fc3" exitCode=0 Jan 05 21:53:48 crc kubenswrapper[4910]: I0105 21:53:48.161900 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bz5df" event={"ID":"aef8fca5-e47a-4942-8f59-42731aa77419","Type":"ContainerDied","Data":"1ad84bad5c219f9535fd051f0a04f2f99c192ffe95b26f01de4a34eac5bf1fc3"} Jan 05 21:53:48 crc kubenswrapper[4910]: I0105 21:53:48.166714 4910 generic.go:334] "Generic (PLEG): container finished" podID="554221c9-a077-40a9-a756-a9589d845ef7" containerID="f5e807c853de6fd82b6a82099000901d1e1e169f7b09244bae2a6d5839bfcf53" exitCode=0 Jan 05 21:53:48 crc kubenswrapper[4910]: I0105 21:53:48.166761 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9lxqq" event={"ID":"554221c9-a077-40a9-a756-a9589d845ef7","Type":"ContainerDied","Data":"f5e807c853de6fd82b6a82099000901d1e1e169f7b09244bae2a6d5839bfcf53"} Jan 05 21:53:48 crc kubenswrapper[4910]: I0105 21:53:48.166778 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9lxqq" event={"ID":"554221c9-a077-40a9-a756-a9589d845ef7","Type":"ContainerStarted","Data":"f076cdf5f22138c572d4d49f6ff430a935801ae00968f42d470a34972a2a01a1"} Jan 05 21:53:48 crc kubenswrapper[4910]: I0105 21:53:48.177095 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=2.177079113 podStartE2EDuration="2.177079113s" podCreationTimestamp="2026-01-05 21:53:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:48.172912145 +0000 UTC m=+159.750409815" watchObservedRunningTime="2026-01-05 21:53:48.177079113 +0000 UTC m=+159.754576783" Jan 05 21:53:48 crc kubenswrapper[4910]: I0105 21:53:48.179776 4910 patch_prober.go:28] interesting pod/router-default-5444994796-fcms5 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 05 21:53:48 crc kubenswrapper[4910]: [-]has-synced failed: reason withheld Jan 05 21:53:48 crc kubenswrapper[4910]: [+]process-running ok Jan 05 21:53:48 crc kubenswrapper[4910]: healthz check failed Jan 05 21:53:48 crc kubenswrapper[4910]: I0105 21:53:48.179839 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-fcms5" podUID="3b0afa0a-d1fe-4c63-a25f-3fd39b954817" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 05 21:53:48 crc kubenswrapper[4910]: I0105 21:53:48.180668 4910 generic.go:334] "Generic (PLEG): container finished" podID="060b3be3-5d9d-47dc-a01e-7a79aa9f13b4" containerID="772a78573309727ad1a034b6ca1dd4290bfb901add28d3f53cafde3b78a60d2f" exitCode=0 Jan 05 21:53:48 crc kubenswrapper[4910]: I0105 21:53:48.181839 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ghpct" event={"ID":"060b3be3-5d9d-47dc-a01e-7a79aa9f13b4","Type":"ContainerDied","Data":"772a78573309727ad1a034b6ca1dd4290bfb901add28d3f53cafde3b78a60d2f"} Jan 05 21:53:48 crc kubenswrapper[4910]: I0105 21:53:48.181867 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ghpct" event={"ID":"060b3be3-5d9d-47dc-a01e-7a79aa9f13b4","Type":"ContainerStarted","Data":"9d0d4a97b86bcf515ce503089ccc693022f42998c56ef744db627fd0576101dd"} Jan 05 21:53:49 crc kubenswrapper[4910]: I0105 21:53:49.176705 4910 patch_prober.go:28] interesting pod/router-default-5444994796-fcms5 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 05 21:53:49 crc kubenswrapper[4910]: [-]has-synced failed: reason withheld Jan 05 21:53:49 crc kubenswrapper[4910]: [+]process-running ok Jan 05 21:53:49 crc kubenswrapper[4910]: healthz check failed Jan 05 21:53:49 crc kubenswrapper[4910]: I0105 21:53:49.177039 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-fcms5" podUID="3b0afa0a-d1fe-4c63-a25f-3fd39b954817" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 05 21:53:49 crc kubenswrapper[4910]: I0105 21:53:49.203949 4910 generic.go:334] "Generic (PLEG): container finished" podID="8f26ac8e-1e9a-4b95-8074-8bc02f7f041d" containerID="8d55f816a844fe02c54710d458c8bf7473581d6214d375da980cd1de499cea80" exitCode=0 Jan 05 21:53:49 crc kubenswrapper[4910]: I0105 21:53:49.204010 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"8f26ac8e-1e9a-4b95-8074-8bc02f7f041d","Type":"ContainerDied","Data":"8d55f816a844fe02c54710d458c8bf7473581d6214d375da980cd1de499cea80"} Jan 05 21:53:49 crc kubenswrapper[4910]: I0105 21:53:49.497800 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 05 21:53:49 crc kubenswrapper[4910]: I0105 21:53:49.499174 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 05 21:53:49 crc kubenswrapper[4910]: I0105 21:53:49.506313 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 05 21:53:49 crc kubenswrapper[4910]: I0105 21:53:49.508707 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 05 21:53:49 crc kubenswrapper[4910]: I0105 21:53:49.523851 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 05 21:53:49 crc kubenswrapper[4910]: I0105 21:53:49.651582 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bbf5e359-4e3e-4315-af00-fa32e8abeb46-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"bbf5e359-4e3e-4315-af00-fa32e8abeb46\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 05 21:53:49 crc kubenswrapper[4910]: I0105 21:53:49.651739 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/bbf5e359-4e3e-4315-af00-fa32e8abeb46-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"bbf5e359-4e3e-4315-af00-fa32e8abeb46\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 05 21:53:49 crc kubenswrapper[4910]: I0105 21:53:49.753841 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bbf5e359-4e3e-4315-af00-fa32e8abeb46-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"bbf5e359-4e3e-4315-af00-fa32e8abeb46\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 05 21:53:49 crc kubenswrapper[4910]: I0105 21:53:49.753905 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/bbf5e359-4e3e-4315-af00-fa32e8abeb46-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"bbf5e359-4e3e-4315-af00-fa32e8abeb46\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 05 21:53:49 crc kubenswrapper[4910]: I0105 21:53:49.754044 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/bbf5e359-4e3e-4315-af00-fa32e8abeb46-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"bbf5e359-4e3e-4315-af00-fa32e8abeb46\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 05 21:53:49 crc kubenswrapper[4910]: I0105 21:53:49.778674 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bbf5e359-4e3e-4315-af00-fa32e8abeb46-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"bbf5e359-4e3e-4315-af00-fa32e8abeb46\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 05 21:53:49 crc kubenswrapper[4910]: I0105 21:53:49.825643 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 05 21:53:50 crc kubenswrapper[4910]: I0105 21:53:50.177456 4910 patch_prober.go:28] interesting pod/router-default-5444994796-fcms5 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 05 21:53:50 crc kubenswrapper[4910]: [-]has-synced failed: reason withheld Jan 05 21:53:50 crc kubenswrapper[4910]: [+]process-running ok Jan 05 21:53:50 crc kubenswrapper[4910]: healthz check failed Jan 05 21:53:50 crc kubenswrapper[4910]: I0105 21:53:50.177873 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-fcms5" podUID="3b0afa0a-d1fe-4c63-a25f-3fd39b954817" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 05 21:53:50 crc kubenswrapper[4910]: I0105 21:53:50.189776 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 05 21:53:50 crc kubenswrapper[4910]: W0105 21:53:50.215800 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-podbbf5e359_4e3e_4315_af00_fa32e8abeb46.slice/crio-283d6e3c7e0640668b74ee20e4f1e2ab5b43a3dc77b53324dc8a5702e026edf9 WatchSource:0}: Error finding container 283d6e3c7e0640668b74ee20e4f1e2ab5b43a3dc77b53324dc8a5702e026edf9: Status 404 returned error can't find the container with id 283d6e3c7e0640668b74ee20e4f1e2ab5b43a3dc77b53324dc8a5702e026edf9 Jan 05 21:53:50 crc kubenswrapper[4910]: I0105 21:53:50.561213 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 05 21:53:50 crc kubenswrapper[4910]: I0105 21:53:50.667332 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/8f26ac8e-1e9a-4b95-8074-8bc02f7f041d-kubelet-dir\") pod \"8f26ac8e-1e9a-4b95-8074-8bc02f7f041d\" (UID: \"8f26ac8e-1e9a-4b95-8074-8bc02f7f041d\") " Jan 05 21:53:50 crc kubenswrapper[4910]: I0105 21:53:50.667497 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8f26ac8e-1e9a-4b95-8074-8bc02f7f041d-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "8f26ac8e-1e9a-4b95-8074-8bc02f7f041d" (UID: "8f26ac8e-1e9a-4b95-8074-8bc02f7f041d"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 21:53:50 crc kubenswrapper[4910]: I0105 21:53:50.667557 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8f26ac8e-1e9a-4b95-8074-8bc02f7f041d-kube-api-access\") pod \"8f26ac8e-1e9a-4b95-8074-8bc02f7f041d\" (UID: \"8f26ac8e-1e9a-4b95-8074-8bc02f7f041d\") " Jan 05 21:53:50 crc kubenswrapper[4910]: I0105 21:53:50.667858 4910 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/8f26ac8e-1e9a-4b95-8074-8bc02f7f041d-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 05 21:53:50 crc kubenswrapper[4910]: I0105 21:53:50.673671 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f26ac8e-1e9a-4b95-8074-8bc02f7f041d-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "8f26ac8e-1e9a-4b95-8074-8bc02f7f041d" (UID: "8f26ac8e-1e9a-4b95-8074-8bc02f7f041d"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:53:50 crc kubenswrapper[4910]: I0105 21:53:50.769685 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8f26ac8e-1e9a-4b95-8074-8bc02f7f041d-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 05 21:53:51 crc kubenswrapper[4910]: I0105 21:53:51.233371 4910 patch_prober.go:28] interesting pod/router-default-5444994796-fcms5 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 05 21:53:51 crc kubenswrapper[4910]: [-]has-synced failed: reason withheld Jan 05 21:53:51 crc kubenswrapper[4910]: [+]process-running ok Jan 05 21:53:51 crc kubenswrapper[4910]: healthz check failed Jan 05 21:53:51 crc kubenswrapper[4910]: I0105 21:53:51.233449 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-fcms5" podUID="3b0afa0a-d1fe-4c63-a25f-3fd39b954817" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 05 21:53:51 crc kubenswrapper[4910]: I0105 21:53:51.370985 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 05 21:53:51 crc kubenswrapper[4910]: I0105 21:53:51.370985 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"8f26ac8e-1e9a-4b95-8074-8bc02f7f041d","Type":"ContainerDied","Data":"af11c586961696165d60eb6a15cc1b5d39c7a74f006a0e4cd76c42248f261e09"} Jan 05 21:53:51 crc kubenswrapper[4910]: I0105 21:53:51.371382 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="af11c586961696165d60eb6a15cc1b5d39c7a74f006a0e4cd76c42248f261e09" Jan 05 21:53:51 crc kubenswrapper[4910]: I0105 21:53:51.382232 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"bbf5e359-4e3e-4315-af00-fa32e8abeb46","Type":"ContainerStarted","Data":"283d6e3c7e0640668b74ee20e4f1e2ab5b43a3dc77b53324dc8a5702e026edf9"} Jan 05 21:53:52 crc kubenswrapper[4910]: I0105 21:53:52.176393 4910 patch_prober.go:28] interesting pod/router-default-5444994796-fcms5 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 05 21:53:52 crc kubenswrapper[4910]: [-]has-synced failed: reason withheld Jan 05 21:53:52 crc kubenswrapper[4910]: [+]process-running ok Jan 05 21:53:52 crc kubenswrapper[4910]: healthz check failed Jan 05 21:53:52 crc kubenswrapper[4910]: I0105 21:53:52.177271 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-fcms5" podUID="3b0afa0a-d1fe-4c63-a25f-3fd39b954817" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 05 21:53:52 crc kubenswrapper[4910]: I0105 21:53:52.395670 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-9pllm" Jan 05 21:53:52 crc kubenswrapper[4910]: I0105 21:53:52.409398 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"bbf5e359-4e3e-4315-af00-fa32e8abeb46","Type":"ContainerStarted","Data":"cd05e6e1928fed87ef563cceef292fae6656fdcb05c90fa7bae2ce84331fe47d"} Jan 05 21:53:52 crc kubenswrapper[4910]: I0105 21:53:52.450056 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=3.450033249 podStartE2EDuration="3.450033249s" podCreationTimestamp="2026-01-05 21:53:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:53:52.449894465 +0000 UTC m=+164.027392135" watchObservedRunningTime="2026-01-05 21:53:52.450033249 +0000 UTC m=+164.027530919" Jan 05 21:53:53 crc kubenswrapper[4910]: I0105 21:53:53.177534 4910 patch_prober.go:28] interesting pod/router-default-5444994796-fcms5 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 05 21:53:53 crc kubenswrapper[4910]: [-]has-synced failed: reason withheld Jan 05 21:53:53 crc kubenswrapper[4910]: [+]process-running ok Jan 05 21:53:53 crc kubenswrapper[4910]: healthz check failed Jan 05 21:53:53 crc kubenswrapper[4910]: I0105 21:53:53.177603 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-fcms5" podUID="3b0afa0a-d1fe-4c63-a25f-3fd39b954817" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 05 21:53:53 crc kubenswrapper[4910]: I0105 21:53:53.421405 4910 generic.go:334] "Generic (PLEG): container finished" podID="bbf5e359-4e3e-4315-af00-fa32e8abeb46" containerID="cd05e6e1928fed87ef563cceef292fae6656fdcb05c90fa7bae2ce84331fe47d" exitCode=0 Jan 05 21:53:53 crc kubenswrapper[4910]: I0105 21:53:53.421469 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"bbf5e359-4e3e-4315-af00-fa32e8abeb46","Type":"ContainerDied","Data":"cd05e6e1928fed87ef563cceef292fae6656fdcb05c90fa7bae2ce84331fe47d"} Jan 05 21:53:54 crc kubenswrapper[4910]: I0105 21:53:54.176282 4910 patch_prober.go:28] interesting pod/router-default-5444994796-fcms5 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 05 21:53:54 crc kubenswrapper[4910]: [-]has-synced failed: reason withheld Jan 05 21:53:54 crc kubenswrapper[4910]: [+]process-running ok Jan 05 21:53:54 crc kubenswrapper[4910]: healthz check failed Jan 05 21:53:54 crc kubenswrapper[4910]: I0105 21:53:54.176652 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-fcms5" podUID="3b0afa0a-d1fe-4c63-a25f-3fd39b954817" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 05 21:53:54 crc kubenswrapper[4910]: I0105 21:53:54.180169 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/74c455b1-4706-4ca7-bd82-2b99c3c83e3f-metrics-certs\") pod \"network-metrics-daemon-mns6n\" (UID: \"74c455b1-4706-4ca7-bd82-2b99c3c83e3f\") " pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:53:54 crc kubenswrapper[4910]: I0105 21:53:54.199647 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/74c455b1-4706-4ca7-bd82-2b99c3c83e3f-metrics-certs\") pod \"network-metrics-daemon-mns6n\" (UID: \"74c455b1-4706-4ca7-bd82-2b99c3c83e3f\") " pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:53:54 crc kubenswrapper[4910]: I0105 21:53:54.450397 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mns6n" Jan 05 21:53:55 crc kubenswrapper[4910]: I0105 21:53:55.176338 4910 patch_prober.go:28] interesting pod/router-default-5444994796-fcms5 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 05 21:53:55 crc kubenswrapper[4910]: [-]has-synced failed: reason withheld Jan 05 21:53:55 crc kubenswrapper[4910]: [+]process-running ok Jan 05 21:53:55 crc kubenswrapper[4910]: healthz check failed Jan 05 21:53:55 crc kubenswrapper[4910]: I0105 21:53:55.176776 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-fcms5" podUID="3b0afa0a-d1fe-4c63-a25f-3fd39b954817" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 05 21:53:56 crc kubenswrapper[4910]: I0105 21:53:56.176263 4910 patch_prober.go:28] interesting pod/router-default-5444994796-fcms5 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 05 21:53:56 crc kubenswrapper[4910]: [-]has-synced failed: reason withheld Jan 05 21:53:56 crc kubenswrapper[4910]: [+]process-running ok Jan 05 21:53:56 crc kubenswrapper[4910]: healthz check failed Jan 05 21:53:56 crc kubenswrapper[4910]: I0105 21:53:56.176358 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-fcms5" podUID="3b0afa0a-d1fe-4c63-a25f-3fd39b954817" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 05 21:53:56 crc kubenswrapper[4910]: I0105 21:53:56.257408 4910 patch_prober.go:28] interesting pod/console-f9d7485db-g5xxj container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Jan 05 21:53:56 crc kubenswrapper[4910]: I0105 21:53:56.257465 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-g5xxj" podUID="8cef9cdb-d8f8-406b-8575-6a6d1b72a638" containerName="console" probeResult="failure" output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" Jan 05 21:53:56 crc kubenswrapper[4910]: I0105 21:53:56.484986 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-k2d98" Jan 05 21:53:57 crc kubenswrapper[4910]: I0105 21:53:57.176544 4910 patch_prober.go:28] interesting pod/router-default-5444994796-fcms5 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 05 21:53:57 crc kubenswrapper[4910]: [-]has-synced failed: reason withheld Jan 05 21:53:57 crc kubenswrapper[4910]: [+]process-running ok Jan 05 21:53:57 crc kubenswrapper[4910]: healthz check failed Jan 05 21:53:57 crc kubenswrapper[4910]: I0105 21:53:57.176640 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-fcms5" podUID="3b0afa0a-d1fe-4c63-a25f-3fd39b954817" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 05 21:53:58 crc kubenswrapper[4910]: I0105 21:53:58.179190 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-fcms5" Jan 05 21:53:58 crc kubenswrapper[4910]: I0105 21:53:58.182061 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-fcms5" Jan 05 21:53:58 crc kubenswrapper[4910]: I0105 21:53:58.483651 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-cluster-samples-operator_cluster-samples-operator-665b6dd947-9tbp2_c897d56d-7140-4aae-b1df-288502d6c78c/cluster-samples-operator/0.log" Jan 05 21:53:58 crc kubenswrapper[4910]: I0105 21:53:58.483730 4910 generic.go:334] "Generic (PLEG): container finished" podID="c897d56d-7140-4aae-b1df-288502d6c78c" containerID="709286d28f5bf7f761aefdd1b0081ee59edec6981b2e968731b0fedf7f7a4b29" exitCode=2 Jan 05 21:53:58 crc kubenswrapper[4910]: I0105 21:53:58.483858 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9tbp2" event={"ID":"c897d56d-7140-4aae-b1df-288502d6c78c","Type":"ContainerDied","Data":"709286d28f5bf7f761aefdd1b0081ee59edec6981b2e968731b0fedf7f7a4b29"} Jan 05 21:53:58 crc kubenswrapper[4910]: I0105 21:53:58.485075 4910 scope.go:117] "RemoveContainer" containerID="709286d28f5bf7f761aefdd1b0081ee59edec6981b2e968731b0fedf7f7a4b29" Jan 05 21:53:59 crc kubenswrapper[4910]: I0105 21:53:59.472659 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 05 21:53:59 crc kubenswrapper[4910]: I0105 21:53:59.492351 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"bbf5e359-4e3e-4315-af00-fa32e8abeb46","Type":"ContainerDied","Data":"283d6e3c7e0640668b74ee20e4f1e2ab5b43a3dc77b53324dc8a5702e026edf9"} Jan 05 21:53:59 crc kubenswrapper[4910]: I0105 21:53:59.492400 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="283d6e3c7e0640668b74ee20e4f1e2ab5b43a3dc77b53324dc8a5702e026edf9" Jan 05 21:53:59 crc kubenswrapper[4910]: I0105 21:53:59.492404 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 05 21:53:59 crc kubenswrapper[4910]: I0105 21:53:59.583232 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-8z8h7"] Jan 05 21:53:59 crc kubenswrapper[4910]: I0105 21:53:59.584178 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-8z8h7" podUID="f869ba01-9cc5-403c-a234-7a6e4864c8fb" containerName="controller-manager" containerID="cri-o://d60e7239fccd4b14e2bdf0aaf7ec48b4cedc6353b047502aace196e9f76c5392" gracePeriod=30 Jan 05 21:53:59 crc kubenswrapper[4910]: I0105 21:53:59.591701 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/bbf5e359-4e3e-4315-af00-fa32e8abeb46-kubelet-dir\") pod \"bbf5e359-4e3e-4315-af00-fa32e8abeb46\" (UID: \"bbf5e359-4e3e-4315-af00-fa32e8abeb46\") " Jan 05 21:53:59 crc kubenswrapper[4910]: I0105 21:53:59.591787 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bbf5e359-4e3e-4315-af00-fa32e8abeb46-kube-api-access\") pod \"bbf5e359-4e3e-4315-af00-fa32e8abeb46\" (UID: \"bbf5e359-4e3e-4315-af00-fa32e8abeb46\") " Jan 05 21:53:59 crc kubenswrapper[4910]: I0105 21:53:59.591861 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bbf5e359-4e3e-4315-af00-fa32e8abeb46-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "bbf5e359-4e3e-4315-af00-fa32e8abeb46" (UID: "bbf5e359-4e3e-4315-af00-fa32e8abeb46"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 21:53:59 crc kubenswrapper[4910]: I0105 21:53:59.592242 4910 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/bbf5e359-4e3e-4315-af00-fa32e8abeb46-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 05 21:53:59 crc kubenswrapper[4910]: I0105 21:53:59.609415 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bbf5e359-4e3e-4315-af00-fa32e8abeb46-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "bbf5e359-4e3e-4315-af00-fa32e8abeb46" (UID: "bbf5e359-4e3e-4315-af00-fa32e8abeb46"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:53:59 crc kubenswrapper[4910]: I0105 21:53:59.614468 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-82zlp"] Jan 05 21:53:59 crc kubenswrapper[4910]: I0105 21:53:59.614856 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-82zlp" podUID="69e2d768-6b62-446e-a239-4b221ba0a979" containerName="route-controller-manager" containerID="cri-o://3348600e07318dda31c69edea321f0b86c2c68c72996f165b861a5de4af3df47" gracePeriod=30 Jan 05 21:53:59 crc kubenswrapper[4910]: I0105 21:53:59.693271 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bbf5e359-4e3e-4315-af00-fa32e8abeb46-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:00 crc kubenswrapper[4910]: I0105 21:54:00.501844 4910 generic.go:334] "Generic (PLEG): container finished" podID="f869ba01-9cc5-403c-a234-7a6e4864c8fb" containerID="d60e7239fccd4b14e2bdf0aaf7ec48b4cedc6353b047502aace196e9f76c5392" exitCode=0 Jan 05 21:54:00 crc kubenswrapper[4910]: I0105 21:54:00.501920 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-8z8h7" event={"ID":"f869ba01-9cc5-403c-a234-7a6e4864c8fb","Type":"ContainerDied","Data":"d60e7239fccd4b14e2bdf0aaf7ec48b4cedc6353b047502aace196e9f76c5392"} Jan 05 21:54:00 crc kubenswrapper[4910]: I0105 21:54:00.505195 4910 generic.go:334] "Generic (PLEG): container finished" podID="69e2d768-6b62-446e-a239-4b221ba0a979" containerID="3348600e07318dda31c69edea321f0b86c2c68c72996f165b861a5de4af3df47" exitCode=0 Jan 05 21:54:00 crc kubenswrapper[4910]: I0105 21:54:00.505235 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-82zlp" event={"ID":"69e2d768-6b62-446e-a239-4b221ba0a979","Type":"ContainerDied","Data":"3348600e07318dda31c69edea321f0b86c2c68c72996f165b861a5de4af3df47"} Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.371531 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.806683 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-8z8h7" Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.817798 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-82zlp" Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.837592 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-578cc95486-jckcg"] Jan 05 21:54:05 crc kubenswrapper[4910]: E0105 21:54:05.837828 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bbf5e359-4e3e-4315-af00-fa32e8abeb46" containerName="pruner" Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.837842 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="bbf5e359-4e3e-4315-af00-fa32e8abeb46" containerName="pruner" Jan 05 21:54:05 crc kubenswrapper[4910]: E0105 21:54:05.837859 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f26ac8e-1e9a-4b95-8074-8bc02f7f041d" containerName="pruner" Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.837864 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f26ac8e-1e9a-4b95-8074-8bc02f7f041d" containerName="pruner" Jan 05 21:54:05 crc kubenswrapper[4910]: E0105 21:54:05.837875 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f869ba01-9cc5-403c-a234-7a6e4864c8fb" containerName="controller-manager" Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.837882 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f869ba01-9cc5-403c-a234-7a6e4864c8fb" containerName="controller-manager" Jan 05 21:54:05 crc kubenswrapper[4910]: E0105 21:54:05.837894 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69e2d768-6b62-446e-a239-4b221ba0a979" containerName="route-controller-manager" Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.837900 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="69e2d768-6b62-446e-a239-4b221ba0a979" containerName="route-controller-manager" Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.838001 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="69e2d768-6b62-446e-a239-4b221ba0a979" containerName="route-controller-manager" Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.838019 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="bbf5e359-4e3e-4315-af00-fa32e8abeb46" containerName="pruner" Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.838028 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f869ba01-9cc5-403c-a234-7a6e4864c8fb" containerName="controller-manager" Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.838036 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f26ac8e-1e9a-4b95-8074-8bc02f7f041d" containerName="pruner" Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.838456 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-578cc95486-jckcg" Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.860374 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-578cc95486-jckcg"] Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.896649 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/69e2d768-6b62-446e-a239-4b221ba0a979-serving-cert\") pod \"69e2d768-6b62-446e-a239-4b221ba0a979\" (UID: \"69e2d768-6b62-446e-a239-4b221ba0a979\") " Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.896856 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f869ba01-9cc5-403c-a234-7a6e4864c8fb-proxy-ca-bundles\") pod \"f869ba01-9cc5-403c-a234-7a6e4864c8fb\" (UID: \"f869ba01-9cc5-403c-a234-7a6e4864c8fb\") " Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.896926 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zmxxh\" (UniqueName: \"kubernetes.io/projected/69e2d768-6b62-446e-a239-4b221ba0a979-kube-api-access-zmxxh\") pod \"69e2d768-6b62-446e-a239-4b221ba0a979\" (UID: \"69e2d768-6b62-446e-a239-4b221ba0a979\") " Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.896954 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69e2d768-6b62-446e-a239-4b221ba0a979-config\") pod \"69e2d768-6b62-446e-a239-4b221ba0a979\" (UID: \"69e2d768-6b62-446e-a239-4b221ba0a979\") " Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.897806 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f869ba01-9cc5-403c-a234-7a6e4864c8fb-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "f869ba01-9cc5-403c-a234-7a6e4864c8fb" (UID: "f869ba01-9cc5-403c-a234-7a6e4864c8fb"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.897888 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9bk7j\" (UniqueName: \"kubernetes.io/projected/f869ba01-9cc5-403c-a234-7a6e4864c8fb-kube-api-access-9bk7j\") pod \"f869ba01-9cc5-403c-a234-7a6e4864c8fb\" (UID: \"f869ba01-9cc5-403c-a234-7a6e4864c8fb\") " Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.897862 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69e2d768-6b62-446e-a239-4b221ba0a979-config" (OuterVolumeSpecName: "config") pod "69e2d768-6b62-446e-a239-4b221ba0a979" (UID: "69e2d768-6b62-446e-a239-4b221ba0a979"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.897963 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f869ba01-9cc5-403c-a234-7a6e4864c8fb-client-ca\") pod \"f869ba01-9cc5-403c-a234-7a6e4864c8fb\" (UID: \"f869ba01-9cc5-403c-a234-7a6e4864c8fb\") " Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.898017 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f869ba01-9cc5-403c-a234-7a6e4864c8fb-config\") pod \"f869ba01-9cc5-403c-a234-7a6e4864c8fb\" (UID: \"f869ba01-9cc5-403c-a234-7a6e4864c8fb\") " Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.898047 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/69e2d768-6b62-446e-a239-4b221ba0a979-client-ca\") pod \"69e2d768-6b62-446e-a239-4b221ba0a979\" (UID: \"69e2d768-6b62-446e-a239-4b221ba0a979\") " Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.898091 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f869ba01-9cc5-403c-a234-7a6e4864c8fb-serving-cert\") pod \"f869ba01-9cc5-403c-a234-7a6e4864c8fb\" (UID: \"f869ba01-9cc5-403c-a234-7a6e4864c8fb\") " Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.898706 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4b60d123-f3ba-4ee7-81c1-b424bec0dc8b-client-ca\") pod \"controller-manager-578cc95486-jckcg\" (UID: \"4b60d123-f3ba-4ee7-81c1-b424bec0dc8b\") " pod="openshift-controller-manager/controller-manager-578cc95486-jckcg" Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.898823 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f869ba01-9cc5-403c-a234-7a6e4864c8fb-config" (OuterVolumeSpecName: "config") pod "f869ba01-9cc5-403c-a234-7a6e4864c8fb" (UID: "f869ba01-9cc5-403c-a234-7a6e4864c8fb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.898866 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b60d123-f3ba-4ee7-81c1-b424bec0dc8b-config\") pod \"controller-manager-578cc95486-jckcg\" (UID: \"4b60d123-f3ba-4ee7-81c1-b424bec0dc8b\") " pod="openshift-controller-manager/controller-manager-578cc95486-jckcg" Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.898865 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69e2d768-6b62-446e-a239-4b221ba0a979-client-ca" (OuterVolumeSpecName: "client-ca") pod "69e2d768-6b62-446e-a239-4b221ba0a979" (UID: "69e2d768-6b62-446e-a239-4b221ba0a979"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.898990 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gkg4f\" (UniqueName: \"kubernetes.io/projected/4b60d123-f3ba-4ee7-81c1-b424bec0dc8b-kube-api-access-gkg4f\") pod \"controller-manager-578cc95486-jckcg\" (UID: \"4b60d123-f3ba-4ee7-81c1-b424bec0dc8b\") " pod="openshift-controller-manager/controller-manager-578cc95486-jckcg" Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.899021 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4b60d123-f3ba-4ee7-81c1-b424bec0dc8b-proxy-ca-bundles\") pod \"controller-manager-578cc95486-jckcg\" (UID: \"4b60d123-f3ba-4ee7-81c1-b424bec0dc8b\") " pod="openshift-controller-manager/controller-manager-578cc95486-jckcg" Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.899041 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4b60d123-f3ba-4ee7-81c1-b424bec0dc8b-serving-cert\") pod \"controller-manager-578cc95486-jckcg\" (UID: \"4b60d123-f3ba-4ee7-81c1-b424bec0dc8b\") " pod="openshift-controller-manager/controller-manager-578cc95486-jckcg" Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.899355 4910 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f869ba01-9cc5-403c-a234-7a6e4864c8fb-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.899377 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69e2d768-6b62-446e-a239-4b221ba0a979-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.899387 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f869ba01-9cc5-403c-a234-7a6e4864c8fb-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.899398 4910 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/69e2d768-6b62-446e-a239-4b221ba0a979-client-ca\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.903785 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f869ba01-9cc5-403c-a234-7a6e4864c8fb-client-ca" (OuterVolumeSpecName: "client-ca") pod "f869ba01-9cc5-403c-a234-7a6e4864c8fb" (UID: "f869ba01-9cc5-403c-a234-7a6e4864c8fb"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.904318 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f869ba01-9cc5-403c-a234-7a6e4864c8fb-kube-api-access-9bk7j" (OuterVolumeSpecName: "kube-api-access-9bk7j") pod "f869ba01-9cc5-403c-a234-7a6e4864c8fb" (UID: "f869ba01-9cc5-403c-a234-7a6e4864c8fb"). InnerVolumeSpecName "kube-api-access-9bk7j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.909659 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69e2d768-6b62-446e-a239-4b221ba0a979-kube-api-access-zmxxh" (OuterVolumeSpecName: "kube-api-access-zmxxh") pod "69e2d768-6b62-446e-a239-4b221ba0a979" (UID: "69e2d768-6b62-446e-a239-4b221ba0a979"). InnerVolumeSpecName "kube-api-access-zmxxh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.910335 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69e2d768-6b62-446e-a239-4b221ba0a979-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "69e2d768-6b62-446e-a239-4b221ba0a979" (UID: "69e2d768-6b62-446e-a239-4b221ba0a979"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:54:05 crc kubenswrapper[4910]: I0105 21:54:05.911353 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f869ba01-9cc5-403c-a234-7a6e4864c8fb-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "f869ba01-9cc5-403c-a234-7a6e4864c8fb" (UID: "f869ba01-9cc5-403c-a234-7a6e4864c8fb"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:54:06 crc kubenswrapper[4910]: I0105 21:54:06.001560 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4b60d123-f3ba-4ee7-81c1-b424bec0dc8b-client-ca\") pod \"controller-manager-578cc95486-jckcg\" (UID: \"4b60d123-f3ba-4ee7-81c1-b424bec0dc8b\") " pod="openshift-controller-manager/controller-manager-578cc95486-jckcg" Jan 05 21:54:06 crc kubenswrapper[4910]: I0105 21:54:06.001643 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b60d123-f3ba-4ee7-81c1-b424bec0dc8b-config\") pod \"controller-manager-578cc95486-jckcg\" (UID: \"4b60d123-f3ba-4ee7-81c1-b424bec0dc8b\") " pod="openshift-controller-manager/controller-manager-578cc95486-jckcg" Jan 05 21:54:06 crc kubenswrapper[4910]: I0105 21:54:06.001673 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gkg4f\" (UniqueName: \"kubernetes.io/projected/4b60d123-f3ba-4ee7-81c1-b424bec0dc8b-kube-api-access-gkg4f\") pod \"controller-manager-578cc95486-jckcg\" (UID: \"4b60d123-f3ba-4ee7-81c1-b424bec0dc8b\") " pod="openshift-controller-manager/controller-manager-578cc95486-jckcg" Jan 05 21:54:06 crc kubenswrapper[4910]: I0105 21:54:06.001732 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4b60d123-f3ba-4ee7-81c1-b424bec0dc8b-proxy-ca-bundles\") pod \"controller-manager-578cc95486-jckcg\" (UID: \"4b60d123-f3ba-4ee7-81c1-b424bec0dc8b\") " pod="openshift-controller-manager/controller-manager-578cc95486-jckcg" Jan 05 21:54:06 crc kubenswrapper[4910]: I0105 21:54:06.001766 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4b60d123-f3ba-4ee7-81c1-b424bec0dc8b-serving-cert\") pod \"controller-manager-578cc95486-jckcg\" (UID: \"4b60d123-f3ba-4ee7-81c1-b424bec0dc8b\") " pod="openshift-controller-manager/controller-manager-578cc95486-jckcg" Jan 05 21:54:06 crc kubenswrapper[4910]: I0105 21:54:06.001913 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zmxxh\" (UniqueName: \"kubernetes.io/projected/69e2d768-6b62-446e-a239-4b221ba0a979-kube-api-access-zmxxh\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:06 crc kubenswrapper[4910]: I0105 21:54:06.001935 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9bk7j\" (UniqueName: \"kubernetes.io/projected/f869ba01-9cc5-403c-a234-7a6e4864c8fb-kube-api-access-9bk7j\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:06 crc kubenswrapper[4910]: I0105 21:54:06.001970 4910 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f869ba01-9cc5-403c-a234-7a6e4864c8fb-client-ca\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:06 crc kubenswrapper[4910]: I0105 21:54:06.001983 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f869ba01-9cc5-403c-a234-7a6e4864c8fb-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:06 crc kubenswrapper[4910]: I0105 21:54:06.001996 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/69e2d768-6b62-446e-a239-4b221ba0a979-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:06 crc kubenswrapper[4910]: I0105 21:54:06.003553 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4b60d123-f3ba-4ee7-81c1-b424bec0dc8b-proxy-ca-bundles\") pod \"controller-manager-578cc95486-jckcg\" (UID: \"4b60d123-f3ba-4ee7-81c1-b424bec0dc8b\") " pod="openshift-controller-manager/controller-manager-578cc95486-jckcg" Jan 05 21:54:06 crc kubenswrapper[4910]: I0105 21:54:06.003607 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b60d123-f3ba-4ee7-81c1-b424bec0dc8b-config\") pod \"controller-manager-578cc95486-jckcg\" (UID: \"4b60d123-f3ba-4ee7-81c1-b424bec0dc8b\") " pod="openshift-controller-manager/controller-manager-578cc95486-jckcg" Jan 05 21:54:06 crc kubenswrapper[4910]: I0105 21:54:06.006642 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4b60d123-f3ba-4ee7-81c1-b424bec0dc8b-serving-cert\") pod \"controller-manager-578cc95486-jckcg\" (UID: \"4b60d123-f3ba-4ee7-81c1-b424bec0dc8b\") " pod="openshift-controller-manager/controller-manager-578cc95486-jckcg" Jan 05 21:54:06 crc kubenswrapper[4910]: I0105 21:54:06.029841 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gkg4f\" (UniqueName: \"kubernetes.io/projected/4b60d123-f3ba-4ee7-81c1-b424bec0dc8b-kube-api-access-gkg4f\") pod \"controller-manager-578cc95486-jckcg\" (UID: \"4b60d123-f3ba-4ee7-81c1-b424bec0dc8b\") " pod="openshift-controller-manager/controller-manager-578cc95486-jckcg" Jan 05 21:54:06 crc kubenswrapper[4910]: I0105 21:54:06.063979 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4b60d123-f3ba-4ee7-81c1-b424bec0dc8b-client-ca\") pod \"controller-manager-578cc95486-jckcg\" (UID: \"4b60d123-f3ba-4ee7-81c1-b424bec0dc8b\") " pod="openshift-controller-manager/controller-manager-578cc95486-jckcg" Jan 05 21:54:06 crc kubenswrapper[4910]: I0105 21:54:06.157501 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-578cc95486-jckcg" Jan 05 21:54:06 crc kubenswrapper[4910]: I0105 21:54:06.261973 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-g5xxj" Jan 05 21:54:06 crc kubenswrapper[4910]: I0105 21:54:06.266700 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-g5xxj" Jan 05 21:54:06 crc kubenswrapper[4910]: I0105 21:54:06.542689 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-8z8h7" event={"ID":"f869ba01-9cc5-403c-a234-7a6e4864c8fb","Type":"ContainerDied","Data":"15dfeafb5af7724704b54b6f3131908006772438d52ac5c7d72479889a54d0af"} Jan 05 21:54:06 crc kubenswrapper[4910]: I0105 21:54:06.542761 4910 scope.go:117] "RemoveContainer" containerID="d60e7239fccd4b14e2bdf0aaf7ec48b4cedc6353b047502aace196e9f76c5392" Jan 05 21:54:06 crc kubenswrapper[4910]: I0105 21:54:06.542894 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-8z8h7" Jan 05 21:54:06 crc kubenswrapper[4910]: I0105 21:54:06.547640 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-82zlp" event={"ID":"69e2d768-6b62-446e-a239-4b221ba0a979","Type":"ContainerDied","Data":"48be7083d499487cea19d82570c936c6794cffbdfa1cad1470f8d110b35b112a"} Jan 05 21:54:06 crc kubenswrapper[4910]: I0105 21:54:06.547685 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-82zlp" Jan 05 21:54:06 crc kubenswrapper[4910]: I0105 21:54:06.583168 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-8z8h7"] Jan 05 21:54:06 crc kubenswrapper[4910]: I0105 21:54:06.590318 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-8z8h7"] Jan 05 21:54:06 crc kubenswrapper[4910]: I0105 21:54:06.595713 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-82zlp"] Jan 05 21:54:06 crc kubenswrapper[4910]: I0105 21:54:06.612964 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-82zlp"] Jan 05 21:54:06 crc kubenswrapper[4910]: I0105 21:54:06.732098 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69e2d768-6b62-446e-a239-4b221ba0a979" path="/var/lib/kubelet/pods/69e2d768-6b62-446e-a239-4b221ba0a979/volumes" Jan 05 21:54:06 crc kubenswrapper[4910]: I0105 21:54:06.732662 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f869ba01-9cc5-403c-a234-7a6e4864c8fb" path="/var/lib/kubelet/pods/f869ba01-9cc5-403c-a234-7a6e4864c8fb/volumes" Jan 05 21:54:07 crc kubenswrapper[4910]: I0105 21:54:07.896782 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-798bfbcf46-jz942"] Jan 05 21:54:07 crc kubenswrapper[4910]: I0105 21:54:07.898950 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-798bfbcf46-jz942" Jan 05 21:54:07 crc kubenswrapper[4910]: I0105 21:54:07.902334 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 05 21:54:07 crc kubenswrapper[4910]: I0105 21:54:07.902458 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 05 21:54:07 crc kubenswrapper[4910]: I0105 21:54:07.902556 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 05 21:54:07 crc kubenswrapper[4910]: I0105 21:54:07.902580 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 05 21:54:07 crc kubenswrapper[4910]: I0105 21:54:07.902587 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 05 21:54:07 crc kubenswrapper[4910]: I0105 21:54:07.902750 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 05 21:54:07 crc kubenswrapper[4910]: I0105 21:54:07.912818 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-798bfbcf46-jz942"] Jan 05 21:54:07 crc kubenswrapper[4910]: I0105 21:54:07.929951 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0-config\") pod \"route-controller-manager-798bfbcf46-jz942\" (UID: \"ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0\") " pod="openshift-route-controller-manager/route-controller-manager-798bfbcf46-jz942" Jan 05 21:54:07 crc kubenswrapper[4910]: I0105 21:54:07.929999 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0-client-ca\") pod \"route-controller-manager-798bfbcf46-jz942\" (UID: \"ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0\") " pod="openshift-route-controller-manager/route-controller-manager-798bfbcf46-jz942" Jan 05 21:54:07 crc kubenswrapper[4910]: I0105 21:54:07.930164 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0-serving-cert\") pod \"route-controller-manager-798bfbcf46-jz942\" (UID: \"ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0\") " pod="openshift-route-controller-manager/route-controller-manager-798bfbcf46-jz942" Jan 05 21:54:07 crc kubenswrapper[4910]: I0105 21:54:07.930530 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-89zvj\" (UniqueName: \"kubernetes.io/projected/ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0-kube-api-access-89zvj\") pod \"route-controller-manager-798bfbcf46-jz942\" (UID: \"ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0\") " pod="openshift-route-controller-manager/route-controller-manager-798bfbcf46-jz942" Jan 05 21:54:08 crc kubenswrapper[4910]: I0105 21:54:08.031794 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0-serving-cert\") pod \"route-controller-manager-798bfbcf46-jz942\" (UID: \"ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0\") " pod="openshift-route-controller-manager/route-controller-manager-798bfbcf46-jz942" Jan 05 21:54:08 crc kubenswrapper[4910]: I0105 21:54:08.031927 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-89zvj\" (UniqueName: \"kubernetes.io/projected/ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0-kube-api-access-89zvj\") pod \"route-controller-manager-798bfbcf46-jz942\" (UID: \"ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0\") " pod="openshift-route-controller-manager/route-controller-manager-798bfbcf46-jz942" Jan 05 21:54:08 crc kubenswrapper[4910]: I0105 21:54:08.032399 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0-config\") pod \"route-controller-manager-798bfbcf46-jz942\" (UID: \"ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0\") " pod="openshift-route-controller-manager/route-controller-manager-798bfbcf46-jz942" Jan 05 21:54:08 crc kubenswrapper[4910]: I0105 21:54:08.032439 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0-client-ca\") pod \"route-controller-manager-798bfbcf46-jz942\" (UID: \"ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0\") " pod="openshift-route-controller-manager/route-controller-manager-798bfbcf46-jz942" Jan 05 21:54:08 crc kubenswrapper[4910]: I0105 21:54:08.033539 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0-client-ca\") pod \"route-controller-manager-798bfbcf46-jz942\" (UID: \"ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0\") " pod="openshift-route-controller-manager/route-controller-manager-798bfbcf46-jz942" Jan 05 21:54:08 crc kubenswrapper[4910]: I0105 21:54:08.033598 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0-config\") pod \"route-controller-manager-798bfbcf46-jz942\" (UID: \"ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0\") " pod="openshift-route-controller-manager/route-controller-manager-798bfbcf46-jz942" Jan 05 21:54:08 crc kubenswrapper[4910]: I0105 21:54:08.038267 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0-serving-cert\") pod \"route-controller-manager-798bfbcf46-jz942\" (UID: \"ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0\") " pod="openshift-route-controller-manager/route-controller-manager-798bfbcf46-jz942" Jan 05 21:54:08 crc kubenswrapper[4910]: I0105 21:54:08.051481 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-89zvj\" (UniqueName: \"kubernetes.io/projected/ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0-kube-api-access-89zvj\") pod \"route-controller-manager-798bfbcf46-jz942\" (UID: \"ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0\") " pod="openshift-route-controller-manager/route-controller-manager-798bfbcf46-jz942" Jan 05 21:54:08 crc kubenswrapper[4910]: I0105 21:54:08.238693 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-798bfbcf46-jz942" Jan 05 21:54:10 crc kubenswrapper[4910]: I0105 21:54:10.952266 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 21:54:10 crc kubenswrapper[4910]: I0105 21:54:10.952654 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 21:54:12 crc kubenswrapper[4910]: E0105 21:54:12.500392 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 05 21:54:12 crc kubenswrapper[4910]: E0105 21:54:12.502171 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zsb2f,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-2hg8l_openshift-marketplace(340fecda-72dc-4870-887a-29b5ef58ae94): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 05 21:54:12 crc kubenswrapper[4910]: E0105 21:54:12.503444 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-2hg8l" podUID="340fecda-72dc-4870-887a-29b5ef58ae94" Jan 05 21:54:15 crc kubenswrapper[4910]: I0105 21:54:15.533152 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 05 21:54:15 crc kubenswrapper[4910]: E0105 21:54:15.581657 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-2hg8l" podUID="340fecda-72dc-4870-887a-29b5ef58ae94" Jan 05 21:54:17 crc kubenswrapper[4910]: I0105 21:54:17.008149 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jwxvn" Jan 05 21:54:19 crc kubenswrapper[4910]: I0105 21:54:19.516988 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-578cc95486-jckcg"] Jan 05 21:54:19 crc kubenswrapper[4910]: I0105 21:54:19.614789 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-798bfbcf46-jz942"] Jan 05 21:54:21 crc kubenswrapper[4910]: E0105 21:54:21.096107 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 05 21:54:21 crc kubenswrapper[4910]: E0105 21:54:21.096630 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4qfsm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-gnvct_openshift-marketplace(93cdd0f0-6faf-4d13-b090-21afa1ae8f76): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 05 21:54:21 crc kubenswrapper[4910]: E0105 21:54:21.097715 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-gnvct" podUID="93cdd0f0-6faf-4d13-b090-21afa1ae8f76" Jan 05 21:54:22 crc kubenswrapper[4910]: E0105 21:54:22.758824 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-gnvct" podUID="93cdd0f0-6faf-4d13-b090-21afa1ae8f76" Jan 05 21:54:22 crc kubenswrapper[4910]: I0105 21:54:22.791043 4910 scope.go:117] "RemoveContainer" containerID="3348600e07318dda31c69edea321f0b86c2c68c72996f165b861a5de4af3df47" Jan 05 21:54:22 crc kubenswrapper[4910]: E0105 21:54:22.862412 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 05 21:54:22 crc kubenswrapper[4910]: E0105 21:54:22.862910 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8gqh6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-bz5df_openshift-marketplace(aef8fca5-e47a-4942-8f59-42731aa77419): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 05 21:54:22 crc kubenswrapper[4910]: E0105 21:54:22.864209 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-bz5df" podUID="aef8fca5-e47a-4942-8f59-42731aa77419" Jan 05 21:54:22 crc kubenswrapper[4910]: E0105 21:54:22.892487 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 05 21:54:22 crc kubenswrapper[4910]: E0105 21:54:22.892674 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mvgsn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-pzbmf_openshift-marketplace(e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 05 21:54:22 crc kubenswrapper[4910]: E0105 21:54:22.894189 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 05 21:54:22 crc kubenswrapper[4910]: E0105 21:54:22.894299 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8ccnl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-wbhgr_openshift-marketplace(e6181ab2-b292-4e7d-b30e-ec724946700c): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 05 21:54:22 crc kubenswrapper[4910]: E0105 21:54:22.895090 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-pzbmf" podUID="e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e" Jan 05 21:54:22 crc kubenswrapper[4910]: E0105 21:54:22.898371 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-wbhgr" podUID="e6181ab2-b292-4e7d-b30e-ec724946700c" Jan 05 21:54:22 crc kubenswrapper[4910]: E0105 21:54:22.921289 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 05 21:54:22 crc kubenswrapper[4910]: E0105 21:54:22.921454 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-967cg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-ghpct_openshift-marketplace(060b3be3-5d9d-47dc-a01e-7a79aa9f13b4): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 05 21:54:22 crc kubenswrapper[4910]: E0105 21:54:22.922656 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-ghpct" podUID="060b3be3-5d9d-47dc-a01e-7a79aa9f13b4" Jan 05 21:54:22 crc kubenswrapper[4910]: E0105 21:54:22.957666 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 05 21:54:22 crc kubenswrapper[4910]: E0105 21:54:22.957836 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-z7xvk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-7tvk2_openshift-marketplace(e67293c9-fc75-468d-b1c5-c09f9ad46dda): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 05 21:54:22 crc kubenswrapper[4910]: E0105 21:54:22.959544 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-7tvk2" podUID="e67293c9-fc75-468d-b1c5-c09f9ad46dda" Jan 05 21:54:23 crc kubenswrapper[4910]: I0105 21:54:23.080508 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-mns6n"] Jan 05 21:54:23 crc kubenswrapper[4910]: W0105 21:54:23.085201 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod74c455b1_4706_4ca7_bd82_2b99c3c83e3f.slice/crio-e47284d805f38aa9e7b6f085e30d74aefebd0bf40af3c57071b9b6eb256bce32 WatchSource:0}: Error finding container e47284d805f38aa9e7b6f085e30d74aefebd0bf40af3c57071b9b6eb256bce32: Status 404 returned error can't find the container with id e47284d805f38aa9e7b6f085e30d74aefebd0bf40af3c57071b9b6eb256bce32 Jan 05 21:54:23 crc kubenswrapper[4910]: I0105 21:54:23.339346 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-798bfbcf46-jz942"] Jan 05 21:54:23 crc kubenswrapper[4910]: W0105 21:54:23.350464 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podff8e1a68_ed73_4082_bdee_5bfd2ec60cf0.slice/crio-596c7683801c081b66e88fcea70d36bae88b4a11e7f14acd94300404f49855b8 WatchSource:0}: Error finding container 596c7683801c081b66e88fcea70d36bae88b4a11e7f14acd94300404f49855b8: Status 404 returned error can't find the container with id 596c7683801c081b66e88fcea70d36bae88b4a11e7f14acd94300404f49855b8 Jan 05 21:54:23 crc kubenswrapper[4910]: I0105 21:54:23.353907 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-578cc95486-jckcg"] Jan 05 21:54:23 crc kubenswrapper[4910]: W0105 21:54:23.360471 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4b60d123_f3ba_4ee7_81c1_b424bec0dc8b.slice/crio-6873d7d32925839c8d4b12d4b196b446cf2dbe1a1e4a5d7861c1612c4717a6bf WatchSource:0}: Error finding container 6873d7d32925839c8d4b12d4b196b446cf2dbe1a1e4a5d7861c1612c4717a6bf: Status 404 returned error can't find the container with id 6873d7d32925839c8d4b12d4b196b446cf2dbe1a1e4a5d7861c1612c4717a6bf Jan 05 21:54:23 crc kubenswrapper[4910]: I0105 21:54:23.652708 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-578cc95486-jckcg" event={"ID":"4b60d123-f3ba-4ee7-81c1-b424bec0dc8b","Type":"ContainerStarted","Data":"e88123560b937517acfc67410c2c32103a584f41a90b19ca55f004e6f0d26539"} Jan 05 21:54:23 crc kubenswrapper[4910]: I0105 21:54:23.653178 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-578cc95486-jckcg" Jan 05 21:54:23 crc kubenswrapper[4910]: I0105 21:54:23.653193 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-578cc95486-jckcg" event={"ID":"4b60d123-f3ba-4ee7-81c1-b424bec0dc8b","Type":"ContainerStarted","Data":"6873d7d32925839c8d4b12d4b196b446cf2dbe1a1e4a5d7861c1612c4717a6bf"} Jan 05 21:54:23 crc kubenswrapper[4910]: I0105 21:54:23.652797 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-578cc95486-jckcg" podUID="4b60d123-f3ba-4ee7-81c1-b424bec0dc8b" containerName="controller-manager" containerID="cri-o://e88123560b937517acfc67410c2c32103a584f41a90b19ca55f004e6f0d26539" gracePeriod=30 Jan 05 21:54:23 crc kubenswrapper[4910]: I0105 21:54:23.657799 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-mns6n" event={"ID":"74c455b1-4706-4ca7-bd82-2b99c3c83e3f","Type":"ContainerStarted","Data":"4a69247939dfed8ab0a2bae136894ab162b724cf6892ac6d2cdcb38185752417"} Jan 05 21:54:23 crc kubenswrapper[4910]: I0105 21:54:23.657875 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-mns6n" event={"ID":"74c455b1-4706-4ca7-bd82-2b99c3c83e3f","Type":"ContainerStarted","Data":"279cd177a0281e9cc29664ffcf001a5b52a6749765af98618b6abc1b775ea77b"} Jan 05 21:54:23 crc kubenswrapper[4910]: I0105 21:54:23.657896 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-mns6n" event={"ID":"74c455b1-4706-4ca7-bd82-2b99c3c83e3f","Type":"ContainerStarted","Data":"e47284d805f38aa9e7b6f085e30d74aefebd0bf40af3c57071b9b6eb256bce32"} Jan 05 21:54:23 crc kubenswrapper[4910]: I0105 21:54:23.658775 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-578cc95486-jckcg" Jan 05 21:54:23 crc kubenswrapper[4910]: I0105 21:54:23.664980 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-cluster-samples-operator_cluster-samples-operator-665b6dd947-9tbp2_c897d56d-7140-4aae-b1df-288502d6c78c/cluster-samples-operator/0.log" Jan 05 21:54:23 crc kubenswrapper[4910]: I0105 21:54:23.665063 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9tbp2" event={"ID":"c897d56d-7140-4aae-b1df-288502d6c78c","Type":"ContainerStarted","Data":"387eecdec072b7afded33ac9332ef361f849a7bb16189c99bd26a7e9c4d17301"} Jan 05 21:54:23 crc kubenswrapper[4910]: I0105 21:54:23.672315 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9lxqq" event={"ID":"554221c9-a077-40a9-a756-a9589d845ef7","Type":"ContainerStarted","Data":"dbcfb43121fcf101d940c2147031039f48304a43c8c9b68f2a9f048e65875d1c"} Jan 05 21:54:23 crc kubenswrapper[4910]: I0105 21:54:23.673455 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-578cc95486-jckcg" podStartSLOduration=24.673428413 podStartE2EDuration="24.673428413s" podCreationTimestamp="2026-01-05 21:53:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:54:23.67169963 +0000 UTC m=+195.249197300" watchObservedRunningTime="2026-01-05 21:54:23.673428413 +0000 UTC m=+195.250926083" Jan 05 21:54:23 crc kubenswrapper[4910]: I0105 21:54:23.683969 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-798bfbcf46-jz942" event={"ID":"ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0","Type":"ContainerStarted","Data":"b7bf73ba4221bece8cb8fc240d4ab64c3849322825320e55f7cfce713a5e0dfa"} Jan 05 21:54:23 crc kubenswrapper[4910]: I0105 21:54:23.684016 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-798bfbcf46-jz942" event={"ID":"ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0","Type":"ContainerStarted","Data":"596c7683801c081b66e88fcea70d36bae88b4a11e7f14acd94300404f49855b8"} Jan 05 21:54:23 crc kubenswrapper[4910]: I0105 21:54:23.684094 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-798bfbcf46-jz942" podUID="ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0" containerName="route-controller-manager" containerID="cri-o://b7bf73ba4221bece8cb8fc240d4ab64c3849322825320e55f7cfce713a5e0dfa" gracePeriod=30 Jan 05 21:54:23 crc kubenswrapper[4910]: E0105 21:54:23.685024 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-7tvk2" podUID="e67293c9-fc75-468d-b1c5-c09f9ad46dda" Jan 05 21:54:23 crc kubenswrapper[4910]: E0105 21:54:23.685546 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-pzbmf" podUID="e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e" Jan 05 21:54:23 crc kubenswrapper[4910]: E0105 21:54:23.685700 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-wbhgr" podUID="e6181ab2-b292-4e7d-b30e-ec724946700c" Jan 05 21:54:23 crc kubenswrapper[4910]: E0105 21:54:23.685883 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-ghpct" podUID="060b3be3-5d9d-47dc-a01e-7a79aa9f13b4" Jan 05 21:54:23 crc kubenswrapper[4910]: E0105 21:54:23.686293 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-bz5df" podUID="aef8fca5-e47a-4942-8f59-42731aa77419" Jan 05 21:54:23 crc kubenswrapper[4910]: I0105 21:54:23.697110 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-mns6n" podStartSLOduration=172.697086167 podStartE2EDuration="2m52.697086167s" podCreationTimestamp="2026-01-05 21:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:54:23.690281549 +0000 UTC m=+195.267779219" watchObservedRunningTime="2026-01-05 21:54:23.697086167 +0000 UTC m=+195.274583837" Jan 05 21:54:23 crc kubenswrapper[4910]: I0105 21:54:23.882928 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-798bfbcf46-jz942" podStartSLOduration=24.882910889 podStartE2EDuration="24.882910889s" podCreationTimestamp="2026-01-05 21:53:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:54:23.881420053 +0000 UTC m=+195.458917723" watchObservedRunningTime="2026-01-05 21:54:23.882910889 +0000 UTC m=+195.460408559" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.032026 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-578cc95486-jckcg" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.070530 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-c9bcd9746-bwbfh"] Jan 05 21:54:24 crc kubenswrapper[4910]: E0105 21:54:24.071530 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b60d123-f3ba-4ee7-81c1-b424bec0dc8b" containerName="controller-manager" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.071584 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b60d123-f3ba-4ee7-81c1-b424bec0dc8b" containerName="controller-manager" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.071756 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b60d123-f3ba-4ee7-81c1-b424bec0dc8b" containerName="controller-manager" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.072387 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-c9bcd9746-bwbfh" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.082662 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-c9bcd9746-bwbfh"] Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.138272 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4b60d123-f3ba-4ee7-81c1-b424bec0dc8b-client-ca\") pod \"4b60d123-f3ba-4ee7-81c1-b424bec0dc8b\" (UID: \"4b60d123-f3ba-4ee7-81c1-b424bec0dc8b\") " Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.138376 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b60d123-f3ba-4ee7-81c1-b424bec0dc8b-config\") pod \"4b60d123-f3ba-4ee7-81c1-b424bec0dc8b\" (UID: \"4b60d123-f3ba-4ee7-81c1-b424bec0dc8b\") " Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.138456 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4b60d123-f3ba-4ee7-81c1-b424bec0dc8b-proxy-ca-bundles\") pod \"4b60d123-f3ba-4ee7-81c1-b424bec0dc8b\" (UID: \"4b60d123-f3ba-4ee7-81c1-b424bec0dc8b\") " Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.138553 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gkg4f\" (UniqueName: \"kubernetes.io/projected/4b60d123-f3ba-4ee7-81c1-b424bec0dc8b-kube-api-access-gkg4f\") pod \"4b60d123-f3ba-4ee7-81c1-b424bec0dc8b\" (UID: \"4b60d123-f3ba-4ee7-81c1-b424bec0dc8b\") " Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.138585 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4b60d123-f3ba-4ee7-81c1-b424bec0dc8b-serving-cert\") pod \"4b60d123-f3ba-4ee7-81c1-b424bec0dc8b\" (UID: \"4b60d123-f3ba-4ee7-81c1-b424bec0dc8b\") " Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.138862 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1b7102a7-347a-4025-bc40-6c78fddf35af-proxy-ca-bundles\") pod \"controller-manager-c9bcd9746-bwbfh\" (UID: \"1b7102a7-347a-4025-bc40-6c78fddf35af\") " pod="openshift-controller-manager/controller-manager-c9bcd9746-bwbfh" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.138892 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b7102a7-347a-4025-bc40-6c78fddf35af-config\") pod \"controller-manager-c9bcd9746-bwbfh\" (UID: \"1b7102a7-347a-4025-bc40-6c78fddf35af\") " pod="openshift-controller-manager/controller-manager-c9bcd9746-bwbfh" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.138931 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1b7102a7-347a-4025-bc40-6c78fddf35af-serving-cert\") pod \"controller-manager-c9bcd9746-bwbfh\" (UID: \"1b7102a7-347a-4025-bc40-6c78fddf35af\") " pod="openshift-controller-manager/controller-manager-c9bcd9746-bwbfh" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.138958 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1b7102a7-347a-4025-bc40-6c78fddf35af-client-ca\") pod \"controller-manager-c9bcd9746-bwbfh\" (UID: \"1b7102a7-347a-4025-bc40-6c78fddf35af\") " pod="openshift-controller-manager/controller-manager-c9bcd9746-bwbfh" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.140185 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b60d123-f3ba-4ee7-81c1-b424bec0dc8b-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "4b60d123-f3ba-4ee7-81c1-b424bec0dc8b" (UID: "4b60d123-f3ba-4ee7-81c1-b424bec0dc8b"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.140297 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b60d123-f3ba-4ee7-81c1-b424bec0dc8b-config" (OuterVolumeSpecName: "config") pod "4b60d123-f3ba-4ee7-81c1-b424bec0dc8b" (UID: "4b60d123-f3ba-4ee7-81c1-b424bec0dc8b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.140360 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rpzs4\" (UniqueName: \"kubernetes.io/projected/1b7102a7-347a-4025-bc40-6c78fddf35af-kube-api-access-rpzs4\") pod \"controller-manager-c9bcd9746-bwbfh\" (UID: \"1b7102a7-347a-4025-bc40-6c78fddf35af\") " pod="openshift-controller-manager/controller-manager-c9bcd9746-bwbfh" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.140462 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b60d123-f3ba-4ee7-81c1-b424bec0dc8b-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.140697 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b60d123-f3ba-4ee7-81c1-b424bec0dc8b-client-ca" (OuterVolumeSpecName: "client-ca") pod "4b60d123-f3ba-4ee7-81c1-b424bec0dc8b" (UID: "4b60d123-f3ba-4ee7-81c1-b424bec0dc8b"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.148105 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b60d123-f3ba-4ee7-81c1-b424bec0dc8b-kube-api-access-gkg4f" (OuterVolumeSpecName: "kube-api-access-gkg4f") pod "4b60d123-f3ba-4ee7-81c1-b424bec0dc8b" (UID: "4b60d123-f3ba-4ee7-81c1-b424bec0dc8b"). InnerVolumeSpecName "kube-api-access-gkg4f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.148598 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b60d123-f3ba-4ee7-81c1-b424bec0dc8b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "4b60d123-f3ba-4ee7-81c1-b424bec0dc8b" (UID: "4b60d123-f3ba-4ee7-81c1-b424bec0dc8b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.174280 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-route-controller-manager_route-controller-manager-798bfbcf46-jz942_ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0/route-controller-manager/0.log" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.174362 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-798bfbcf46-jz942" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.241536 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-89zvj\" (UniqueName: \"kubernetes.io/projected/ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0-kube-api-access-89zvj\") pod \"ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0\" (UID: \"ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0\") " Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.241594 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0-client-ca\") pod \"ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0\" (UID: \"ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0\") " Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.241631 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0-serving-cert\") pod \"ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0\" (UID: \"ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0\") " Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.241698 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0-config\") pod \"ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0\" (UID: \"ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0\") " Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.241879 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1b7102a7-347a-4025-bc40-6c78fddf35af-proxy-ca-bundles\") pod \"controller-manager-c9bcd9746-bwbfh\" (UID: \"1b7102a7-347a-4025-bc40-6c78fddf35af\") " pod="openshift-controller-manager/controller-manager-c9bcd9746-bwbfh" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.241906 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b7102a7-347a-4025-bc40-6c78fddf35af-config\") pod \"controller-manager-c9bcd9746-bwbfh\" (UID: \"1b7102a7-347a-4025-bc40-6c78fddf35af\") " pod="openshift-controller-manager/controller-manager-c9bcd9746-bwbfh" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.241942 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1b7102a7-347a-4025-bc40-6c78fddf35af-serving-cert\") pod \"controller-manager-c9bcd9746-bwbfh\" (UID: \"1b7102a7-347a-4025-bc40-6c78fddf35af\") " pod="openshift-controller-manager/controller-manager-c9bcd9746-bwbfh" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.241967 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1b7102a7-347a-4025-bc40-6c78fddf35af-client-ca\") pod \"controller-manager-c9bcd9746-bwbfh\" (UID: \"1b7102a7-347a-4025-bc40-6c78fddf35af\") " pod="openshift-controller-manager/controller-manager-c9bcd9746-bwbfh" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.241984 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rpzs4\" (UniqueName: \"kubernetes.io/projected/1b7102a7-347a-4025-bc40-6c78fddf35af-kube-api-access-rpzs4\") pod \"controller-manager-c9bcd9746-bwbfh\" (UID: \"1b7102a7-347a-4025-bc40-6c78fddf35af\") " pod="openshift-controller-manager/controller-manager-c9bcd9746-bwbfh" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.242053 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4b60d123-f3ba-4ee7-81c1-b424bec0dc8b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.242067 4910 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4b60d123-f3ba-4ee7-81c1-b424bec0dc8b-client-ca\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.242076 4910 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4b60d123-f3ba-4ee7-81c1-b424bec0dc8b-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.242088 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gkg4f\" (UniqueName: \"kubernetes.io/projected/4b60d123-f3ba-4ee7-81c1-b424bec0dc8b-kube-api-access-gkg4f\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.242605 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0-client-ca" (OuterVolumeSpecName: "client-ca") pod "ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0" (UID: "ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.243625 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1b7102a7-347a-4025-bc40-6c78fddf35af-client-ca\") pod \"controller-manager-c9bcd9746-bwbfh\" (UID: \"1b7102a7-347a-4025-bc40-6c78fddf35af\") " pod="openshift-controller-manager/controller-manager-c9bcd9746-bwbfh" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.244198 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b7102a7-347a-4025-bc40-6c78fddf35af-config\") pod \"controller-manager-c9bcd9746-bwbfh\" (UID: \"1b7102a7-347a-4025-bc40-6c78fddf35af\") " pod="openshift-controller-manager/controller-manager-c9bcd9746-bwbfh" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.244237 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1b7102a7-347a-4025-bc40-6c78fddf35af-proxy-ca-bundles\") pod \"controller-manager-c9bcd9746-bwbfh\" (UID: \"1b7102a7-347a-4025-bc40-6c78fddf35af\") " pod="openshift-controller-manager/controller-manager-c9bcd9746-bwbfh" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.244238 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0-config" (OuterVolumeSpecName: "config") pod "ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0" (UID: "ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.245717 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0" (UID: "ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.246097 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1b7102a7-347a-4025-bc40-6c78fddf35af-serving-cert\") pod \"controller-manager-c9bcd9746-bwbfh\" (UID: \"1b7102a7-347a-4025-bc40-6c78fddf35af\") " pod="openshift-controller-manager/controller-manager-c9bcd9746-bwbfh" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.246538 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0-kube-api-access-89zvj" (OuterVolumeSpecName: "kube-api-access-89zvj") pod "ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0" (UID: "ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0"). InnerVolumeSpecName "kube-api-access-89zvj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.262022 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rpzs4\" (UniqueName: \"kubernetes.io/projected/1b7102a7-347a-4025-bc40-6c78fddf35af-kube-api-access-rpzs4\") pod \"controller-manager-c9bcd9746-bwbfh\" (UID: \"1b7102a7-347a-4025-bc40-6c78fddf35af\") " pod="openshift-controller-manager/controller-manager-c9bcd9746-bwbfh" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.343379 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.343424 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-89zvj\" (UniqueName: \"kubernetes.io/projected/ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0-kube-api-access-89zvj\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.343438 4910 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0-client-ca\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.343448 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.400227 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-c9bcd9746-bwbfh" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.613870 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-c9bcd9746-bwbfh"] Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.700849 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-c9bcd9746-bwbfh" event={"ID":"1b7102a7-347a-4025-bc40-6c78fddf35af","Type":"ContainerStarted","Data":"b6b8d31f3c477f62f2054029b7c0c4693b8daf6f336a427b33b2becbe73488b5"} Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.702578 4910 generic.go:334] "Generic (PLEG): container finished" podID="4b60d123-f3ba-4ee7-81c1-b424bec0dc8b" containerID="e88123560b937517acfc67410c2c32103a584f41a90b19ca55f004e6f0d26539" exitCode=0 Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.702658 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-578cc95486-jckcg" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.702672 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-578cc95486-jckcg" event={"ID":"4b60d123-f3ba-4ee7-81c1-b424bec0dc8b","Type":"ContainerDied","Data":"e88123560b937517acfc67410c2c32103a584f41a90b19ca55f004e6f0d26539"} Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.702709 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-578cc95486-jckcg" event={"ID":"4b60d123-f3ba-4ee7-81c1-b424bec0dc8b","Type":"ContainerDied","Data":"6873d7d32925839c8d4b12d4b196b446cf2dbe1a1e4a5d7861c1612c4717a6bf"} Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.702729 4910 scope.go:117] "RemoveContainer" containerID="e88123560b937517acfc67410c2c32103a584f41a90b19ca55f004e6f0d26539" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.705903 4910 generic.go:334] "Generic (PLEG): container finished" podID="554221c9-a077-40a9-a756-a9589d845ef7" containerID="dbcfb43121fcf101d940c2147031039f48304a43c8c9b68f2a9f048e65875d1c" exitCode=0 Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.705946 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9lxqq" event={"ID":"554221c9-a077-40a9-a756-a9589d845ef7","Type":"ContainerDied","Data":"dbcfb43121fcf101d940c2147031039f48304a43c8c9b68f2a9f048e65875d1c"} Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.711221 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-route-controller-manager_route-controller-manager-798bfbcf46-jz942_ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0/route-controller-manager/0.log" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.711299 4910 generic.go:334] "Generic (PLEG): container finished" podID="ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0" containerID="b7bf73ba4221bece8cb8fc240d4ab64c3849322825320e55f7cfce713a5e0dfa" exitCode=255 Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.711373 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-798bfbcf46-jz942" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.711376 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-798bfbcf46-jz942" event={"ID":"ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0","Type":"ContainerDied","Data":"b7bf73ba4221bece8cb8fc240d4ab64c3849322825320e55f7cfce713a5e0dfa"} Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.711472 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-798bfbcf46-jz942" event={"ID":"ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0","Type":"ContainerDied","Data":"596c7683801c081b66e88fcea70d36bae88b4a11e7f14acd94300404f49855b8"} Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.728105 4910 scope.go:117] "RemoveContainer" containerID="e88123560b937517acfc67410c2c32103a584f41a90b19ca55f004e6f0d26539" Jan 05 21:54:24 crc kubenswrapper[4910]: E0105 21:54:24.730105 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e88123560b937517acfc67410c2c32103a584f41a90b19ca55f004e6f0d26539\": container with ID starting with e88123560b937517acfc67410c2c32103a584f41a90b19ca55f004e6f0d26539 not found: ID does not exist" containerID="e88123560b937517acfc67410c2c32103a584f41a90b19ca55f004e6f0d26539" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.730173 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e88123560b937517acfc67410c2c32103a584f41a90b19ca55f004e6f0d26539"} err="failed to get container status \"e88123560b937517acfc67410c2c32103a584f41a90b19ca55f004e6f0d26539\": rpc error: code = NotFound desc = could not find container \"e88123560b937517acfc67410c2c32103a584f41a90b19ca55f004e6f0d26539\": container with ID starting with e88123560b937517acfc67410c2c32103a584f41a90b19ca55f004e6f0d26539 not found: ID does not exist" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.730245 4910 scope.go:117] "RemoveContainer" containerID="b7bf73ba4221bece8cb8fc240d4ab64c3849322825320e55f7cfce713a5e0dfa" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.769598 4910 scope.go:117] "RemoveContainer" containerID="b7bf73ba4221bece8cb8fc240d4ab64c3849322825320e55f7cfce713a5e0dfa" Jan 05 21:54:24 crc kubenswrapper[4910]: E0105 21:54:24.770136 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b7bf73ba4221bece8cb8fc240d4ab64c3849322825320e55f7cfce713a5e0dfa\": container with ID starting with b7bf73ba4221bece8cb8fc240d4ab64c3849322825320e55f7cfce713a5e0dfa not found: ID does not exist" containerID="b7bf73ba4221bece8cb8fc240d4ab64c3849322825320e55f7cfce713a5e0dfa" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.770173 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7bf73ba4221bece8cb8fc240d4ab64c3849322825320e55f7cfce713a5e0dfa"} err="failed to get container status \"b7bf73ba4221bece8cb8fc240d4ab64c3849322825320e55f7cfce713a5e0dfa\": rpc error: code = NotFound desc = could not find container \"b7bf73ba4221bece8cb8fc240d4ab64c3849322825320e55f7cfce713a5e0dfa\": container with ID starting with b7bf73ba4221bece8cb8fc240d4ab64c3849322825320e55f7cfce713a5e0dfa not found: ID does not exist" Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.773731 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-798bfbcf46-jz942"] Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.779643 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-798bfbcf46-jz942"] Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.782920 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-578cc95486-jckcg"] Jan 05 21:54:24 crc kubenswrapper[4910]: I0105 21:54:24.786577 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-578cc95486-jckcg"] Jan 05 21:54:25 crc kubenswrapper[4910]: I0105 21:54:25.719406 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-c9bcd9746-bwbfh" event={"ID":"1b7102a7-347a-4025-bc40-6c78fddf35af","Type":"ContainerStarted","Data":"d55aca133b5d9f970e190a25805974ecd2324a431f45d886466f2f62cd595f18"} Jan 05 21:54:25 crc kubenswrapper[4910]: I0105 21:54:25.719807 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-c9bcd9746-bwbfh" Jan 05 21:54:25 crc kubenswrapper[4910]: I0105 21:54:25.723481 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9lxqq" event={"ID":"554221c9-a077-40a9-a756-a9589d845ef7","Type":"ContainerStarted","Data":"785f614a9a7148e5ca73e575bb3e2dd6d8639ee93f0eba0173a608f044d98c1a"} Jan 05 21:54:25 crc kubenswrapper[4910]: I0105 21:54:25.732802 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-c9bcd9746-bwbfh" Jan 05 21:54:25 crc kubenswrapper[4910]: I0105 21:54:25.736741 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-c9bcd9746-bwbfh" podStartSLOduration=6.736721364 podStartE2EDuration="6.736721364s" podCreationTimestamp="2026-01-05 21:54:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:54:25.735970091 +0000 UTC m=+197.313467771" watchObservedRunningTime="2026-01-05 21:54:25.736721364 +0000 UTC m=+197.314219054" Jan 05 21:54:25 crc kubenswrapper[4910]: I0105 21:54:25.778610 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-9lxqq" podStartSLOduration=2.836561109 podStartE2EDuration="39.778585024s" podCreationTimestamp="2026-01-05 21:53:46 +0000 UTC" firstStartedPulling="2026-01-05 21:53:48.183418917 +0000 UTC m=+159.760916587" lastFinishedPulling="2026-01-05 21:54:25.125442832 +0000 UTC m=+196.702940502" observedRunningTime="2026-01-05 21:54:25.759681997 +0000 UTC m=+197.337179677" watchObservedRunningTime="2026-01-05 21:54:25.778585024 +0000 UTC m=+197.356082714" Jan 05 21:54:26 crc kubenswrapper[4910]: I0105 21:54:26.735577 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b60d123-f3ba-4ee7-81c1-b424bec0dc8b" path="/var/lib/kubelet/pods/4b60d123-f3ba-4ee7-81c1-b424bec0dc8b/volumes" Jan 05 21:54:26 crc kubenswrapper[4910]: I0105 21:54:26.737720 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0" path="/var/lib/kubelet/pods/ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0/volumes" Jan 05 21:54:26 crc kubenswrapper[4910]: I0105 21:54:26.913167 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-54c78b4894-sdlgm"] Jan 05 21:54:26 crc kubenswrapper[4910]: E0105 21:54:26.913518 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0" containerName="route-controller-manager" Jan 05 21:54:26 crc kubenswrapper[4910]: I0105 21:54:26.913538 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0" containerName="route-controller-manager" Jan 05 21:54:26 crc kubenswrapper[4910]: I0105 21:54:26.913731 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff8e1a68-ed73-4082-bdee-5bfd2ec60cf0" containerName="route-controller-manager" Jan 05 21:54:26 crc kubenswrapper[4910]: I0105 21:54:26.914347 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-54c78b4894-sdlgm" Jan 05 21:54:26 crc kubenswrapper[4910]: I0105 21:54:26.916111 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 05 21:54:26 crc kubenswrapper[4910]: I0105 21:54:26.916492 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 05 21:54:26 crc kubenswrapper[4910]: I0105 21:54:26.919267 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 05 21:54:26 crc kubenswrapper[4910]: I0105 21:54:26.919276 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 05 21:54:26 crc kubenswrapper[4910]: I0105 21:54:26.920060 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 05 21:54:26 crc kubenswrapper[4910]: I0105 21:54:26.923422 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 05 21:54:26 crc kubenswrapper[4910]: I0105 21:54:26.926430 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-54c78b4894-sdlgm"] Jan 05 21:54:26 crc kubenswrapper[4910]: I0105 21:54:26.985753 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1f60515c-9ab2-40c3-a430-2ff330f483e6-serving-cert\") pod \"route-controller-manager-54c78b4894-sdlgm\" (UID: \"1f60515c-9ab2-40c3-a430-2ff330f483e6\") " pod="openshift-route-controller-manager/route-controller-manager-54c78b4894-sdlgm" Jan 05 21:54:26 crc kubenswrapper[4910]: I0105 21:54:26.985804 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1f60515c-9ab2-40c3-a430-2ff330f483e6-client-ca\") pod \"route-controller-manager-54c78b4894-sdlgm\" (UID: \"1f60515c-9ab2-40c3-a430-2ff330f483e6\") " pod="openshift-route-controller-manager/route-controller-manager-54c78b4894-sdlgm" Jan 05 21:54:26 crc kubenswrapper[4910]: I0105 21:54:26.985858 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7sfts\" (UniqueName: \"kubernetes.io/projected/1f60515c-9ab2-40c3-a430-2ff330f483e6-kube-api-access-7sfts\") pod \"route-controller-manager-54c78b4894-sdlgm\" (UID: \"1f60515c-9ab2-40c3-a430-2ff330f483e6\") " pod="openshift-route-controller-manager/route-controller-manager-54c78b4894-sdlgm" Jan 05 21:54:26 crc kubenswrapper[4910]: I0105 21:54:26.985897 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f60515c-9ab2-40c3-a430-2ff330f483e6-config\") pod \"route-controller-manager-54c78b4894-sdlgm\" (UID: \"1f60515c-9ab2-40c3-a430-2ff330f483e6\") " pod="openshift-route-controller-manager/route-controller-manager-54c78b4894-sdlgm" Jan 05 21:54:27 crc kubenswrapper[4910]: I0105 21:54:27.087613 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f60515c-9ab2-40c3-a430-2ff330f483e6-config\") pod \"route-controller-manager-54c78b4894-sdlgm\" (UID: \"1f60515c-9ab2-40c3-a430-2ff330f483e6\") " pod="openshift-route-controller-manager/route-controller-manager-54c78b4894-sdlgm" Jan 05 21:54:27 crc kubenswrapper[4910]: I0105 21:54:27.087687 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1f60515c-9ab2-40c3-a430-2ff330f483e6-serving-cert\") pod \"route-controller-manager-54c78b4894-sdlgm\" (UID: \"1f60515c-9ab2-40c3-a430-2ff330f483e6\") " pod="openshift-route-controller-manager/route-controller-manager-54c78b4894-sdlgm" Jan 05 21:54:27 crc kubenswrapper[4910]: I0105 21:54:27.087709 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1f60515c-9ab2-40c3-a430-2ff330f483e6-client-ca\") pod \"route-controller-manager-54c78b4894-sdlgm\" (UID: \"1f60515c-9ab2-40c3-a430-2ff330f483e6\") " pod="openshift-route-controller-manager/route-controller-manager-54c78b4894-sdlgm" Jan 05 21:54:27 crc kubenswrapper[4910]: I0105 21:54:27.087755 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7sfts\" (UniqueName: \"kubernetes.io/projected/1f60515c-9ab2-40c3-a430-2ff330f483e6-kube-api-access-7sfts\") pod \"route-controller-manager-54c78b4894-sdlgm\" (UID: \"1f60515c-9ab2-40c3-a430-2ff330f483e6\") " pod="openshift-route-controller-manager/route-controller-manager-54c78b4894-sdlgm" Jan 05 21:54:27 crc kubenswrapper[4910]: I0105 21:54:27.088974 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1f60515c-9ab2-40c3-a430-2ff330f483e6-client-ca\") pod \"route-controller-manager-54c78b4894-sdlgm\" (UID: \"1f60515c-9ab2-40c3-a430-2ff330f483e6\") " pod="openshift-route-controller-manager/route-controller-manager-54c78b4894-sdlgm" Jan 05 21:54:27 crc kubenswrapper[4910]: I0105 21:54:27.090270 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f60515c-9ab2-40c3-a430-2ff330f483e6-config\") pod \"route-controller-manager-54c78b4894-sdlgm\" (UID: \"1f60515c-9ab2-40c3-a430-2ff330f483e6\") " pod="openshift-route-controller-manager/route-controller-manager-54c78b4894-sdlgm" Jan 05 21:54:27 crc kubenswrapper[4910]: I0105 21:54:27.103850 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7sfts\" (UniqueName: \"kubernetes.io/projected/1f60515c-9ab2-40c3-a430-2ff330f483e6-kube-api-access-7sfts\") pod \"route-controller-manager-54c78b4894-sdlgm\" (UID: \"1f60515c-9ab2-40c3-a430-2ff330f483e6\") " pod="openshift-route-controller-manager/route-controller-manager-54c78b4894-sdlgm" Jan 05 21:54:27 crc kubenswrapper[4910]: I0105 21:54:27.104176 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1f60515c-9ab2-40c3-a430-2ff330f483e6-serving-cert\") pod \"route-controller-manager-54c78b4894-sdlgm\" (UID: \"1f60515c-9ab2-40c3-a430-2ff330f483e6\") " pod="openshift-route-controller-manager/route-controller-manager-54c78b4894-sdlgm" Jan 05 21:54:27 crc kubenswrapper[4910]: I0105 21:54:27.181409 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9lxqq" Jan 05 21:54:27 crc kubenswrapper[4910]: I0105 21:54:27.181462 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9lxqq" Jan 05 21:54:27 crc kubenswrapper[4910]: I0105 21:54:27.235322 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-54c78b4894-sdlgm" Jan 05 21:54:27 crc kubenswrapper[4910]: I0105 21:54:27.296817 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 05 21:54:27 crc kubenswrapper[4910]: I0105 21:54:27.297861 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 05 21:54:27 crc kubenswrapper[4910]: I0105 21:54:27.299523 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 05 21:54:27 crc kubenswrapper[4910]: I0105 21:54:27.300440 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 05 21:54:27 crc kubenswrapper[4910]: I0105 21:54:27.305462 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 05 21:54:27 crc kubenswrapper[4910]: I0105 21:54:27.391353 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9401ebc0-64e8-48b8-9702-4c4dc6867695-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"9401ebc0-64e8-48b8-9702-4c4dc6867695\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 05 21:54:27 crc kubenswrapper[4910]: I0105 21:54:27.391757 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9401ebc0-64e8-48b8-9702-4c4dc6867695-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"9401ebc0-64e8-48b8-9702-4c4dc6867695\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 05 21:54:27 crc kubenswrapper[4910]: I0105 21:54:27.493101 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9401ebc0-64e8-48b8-9702-4c4dc6867695-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"9401ebc0-64e8-48b8-9702-4c4dc6867695\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 05 21:54:27 crc kubenswrapper[4910]: I0105 21:54:27.493208 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9401ebc0-64e8-48b8-9702-4c4dc6867695-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"9401ebc0-64e8-48b8-9702-4c4dc6867695\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 05 21:54:27 crc kubenswrapper[4910]: I0105 21:54:27.493317 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9401ebc0-64e8-48b8-9702-4c4dc6867695-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"9401ebc0-64e8-48b8-9702-4c4dc6867695\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 05 21:54:27 crc kubenswrapper[4910]: I0105 21:54:27.520209 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9401ebc0-64e8-48b8-9702-4c4dc6867695-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"9401ebc0-64e8-48b8-9702-4c4dc6867695\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 05 21:54:27 crc kubenswrapper[4910]: I0105 21:54:27.685875 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 05 21:54:27 crc kubenswrapper[4910]: I0105 21:54:27.699556 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-54c78b4894-sdlgm"] Jan 05 21:54:27 crc kubenswrapper[4910]: W0105 21:54:27.719112 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1f60515c_9ab2_40c3_a430_2ff330f483e6.slice/crio-3bf94ada5b932c2aa7216de0c02754dd4c4b450bbf11a68e10dc55f0e8f502cc WatchSource:0}: Error finding container 3bf94ada5b932c2aa7216de0c02754dd4c4b450bbf11a68e10dc55f0e8f502cc: Status 404 returned error can't find the container with id 3bf94ada5b932c2aa7216de0c02754dd4c4b450bbf11a68e10dc55f0e8f502cc Jan 05 21:54:27 crc kubenswrapper[4910]: I0105 21:54:27.740531 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-54c78b4894-sdlgm" event={"ID":"1f60515c-9ab2-40c3-a430-2ff330f483e6","Type":"ContainerStarted","Data":"3bf94ada5b932c2aa7216de0c02754dd4c4b450bbf11a68e10dc55f0e8f502cc"} Jan 05 21:54:28 crc kubenswrapper[4910]: I0105 21:54:28.164355 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 05 21:54:28 crc kubenswrapper[4910]: I0105 21:54:28.419342 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lxqq" podUID="554221c9-a077-40a9-a756-a9589d845ef7" containerName="registry-server" probeResult="failure" output=< Jan 05 21:54:28 crc kubenswrapper[4910]: timeout: failed to connect service ":50051" within 1s Jan 05 21:54:28 crc kubenswrapper[4910]: > Jan 05 21:54:28 crc kubenswrapper[4910]: I0105 21:54:28.756158 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-54c78b4894-sdlgm" event={"ID":"1f60515c-9ab2-40c3-a430-2ff330f483e6","Type":"ContainerStarted","Data":"cc6a4f19363c913fe18c7b2e849509266544bcd242c3feb4478d7afdd0841224"} Jan 05 21:54:28 crc kubenswrapper[4910]: I0105 21:54:28.756436 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-54c78b4894-sdlgm" Jan 05 21:54:28 crc kubenswrapper[4910]: I0105 21:54:28.762349 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"9401ebc0-64e8-48b8-9702-4c4dc6867695","Type":"ContainerStarted","Data":"1288f630221873a227c382024f1b8ff87b53cbb183bc4db60fc673389a13369a"} Jan 05 21:54:28 crc kubenswrapper[4910]: I0105 21:54:28.762424 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-54c78b4894-sdlgm" Jan 05 21:54:28 crc kubenswrapper[4910]: I0105 21:54:28.784010 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-54c78b4894-sdlgm" podStartSLOduration=9.783993409 podStartE2EDuration="9.783993409s" podCreationTimestamp="2026-01-05 21:54:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:54:28.782156405 +0000 UTC m=+200.359654075" watchObservedRunningTime="2026-01-05 21:54:28.783993409 +0000 UTC m=+200.361491069" Jan 05 21:54:29 crc kubenswrapper[4910]: I0105 21:54:29.770003 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2hg8l" event={"ID":"340fecda-72dc-4870-887a-29b5ef58ae94","Type":"ContainerStarted","Data":"ed035abb419cdb0e866ffb3df08920aead94f7a2e6623e780de7247aaa310095"} Jan 05 21:54:29 crc kubenswrapper[4910]: I0105 21:54:29.773617 4910 generic.go:334] "Generic (PLEG): container finished" podID="9401ebc0-64e8-48b8-9702-4c4dc6867695" containerID="c270aa4bfd9a08272b1b6902b309722b2bac1237b281c72a07e85cb2fcdc68e4" exitCode=0 Jan 05 21:54:29 crc kubenswrapper[4910]: I0105 21:54:29.773761 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"9401ebc0-64e8-48b8-9702-4c4dc6867695","Type":"ContainerDied","Data":"c270aa4bfd9a08272b1b6902b309722b2bac1237b281c72a07e85cb2fcdc68e4"} Jan 05 21:54:30 crc kubenswrapper[4910]: I0105 21:54:30.779250 4910 generic.go:334] "Generic (PLEG): container finished" podID="340fecda-72dc-4870-887a-29b5ef58ae94" containerID="ed035abb419cdb0e866ffb3df08920aead94f7a2e6623e780de7247aaa310095" exitCode=0 Jan 05 21:54:30 crc kubenswrapper[4910]: I0105 21:54:30.780311 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2hg8l" event={"ID":"340fecda-72dc-4870-887a-29b5ef58ae94","Type":"ContainerDied","Data":"ed035abb419cdb0e866ffb3df08920aead94f7a2e6623e780de7247aaa310095"} Jan 05 21:54:31 crc kubenswrapper[4910]: I0105 21:54:31.127213 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 05 21:54:31 crc kubenswrapper[4910]: I0105 21:54:31.255853 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9401ebc0-64e8-48b8-9702-4c4dc6867695-kubelet-dir\") pod \"9401ebc0-64e8-48b8-9702-4c4dc6867695\" (UID: \"9401ebc0-64e8-48b8-9702-4c4dc6867695\") " Jan 05 21:54:31 crc kubenswrapper[4910]: I0105 21:54:31.256231 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9401ebc0-64e8-48b8-9702-4c4dc6867695-kube-api-access\") pod \"9401ebc0-64e8-48b8-9702-4c4dc6867695\" (UID: \"9401ebc0-64e8-48b8-9702-4c4dc6867695\") " Jan 05 21:54:31 crc kubenswrapper[4910]: I0105 21:54:31.255982 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9401ebc0-64e8-48b8-9702-4c4dc6867695-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "9401ebc0-64e8-48b8-9702-4c4dc6867695" (UID: "9401ebc0-64e8-48b8-9702-4c4dc6867695"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 21:54:31 crc kubenswrapper[4910]: I0105 21:54:31.256626 4910 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9401ebc0-64e8-48b8-9702-4c4dc6867695-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:31 crc kubenswrapper[4910]: I0105 21:54:31.262378 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9401ebc0-64e8-48b8-9702-4c4dc6867695-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "9401ebc0-64e8-48b8-9702-4c4dc6867695" (UID: "9401ebc0-64e8-48b8-9702-4c4dc6867695"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:54:31 crc kubenswrapper[4910]: I0105 21:54:31.357795 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9401ebc0-64e8-48b8-9702-4c4dc6867695-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:31 crc kubenswrapper[4910]: I0105 21:54:31.788386 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2hg8l" event={"ID":"340fecda-72dc-4870-887a-29b5ef58ae94","Type":"ContainerStarted","Data":"bd4e3409bf7d1d3b572fd967c5225bc3dee6e4b1ee3a4eda613979d62a9647ad"} Jan 05 21:54:31 crc kubenswrapper[4910]: I0105 21:54:31.790854 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"9401ebc0-64e8-48b8-9702-4c4dc6867695","Type":"ContainerDied","Data":"1288f630221873a227c382024f1b8ff87b53cbb183bc4db60fc673389a13369a"} Jan 05 21:54:31 crc kubenswrapper[4910]: I0105 21:54:31.790897 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1288f630221873a227c382024f1b8ff87b53cbb183bc4db60fc673389a13369a" Jan 05 21:54:31 crc kubenswrapper[4910]: I0105 21:54:31.790906 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 05 21:54:31 crc kubenswrapper[4910]: I0105 21:54:31.816180 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2hg8l" podStartSLOduration=2.547117518 podStartE2EDuration="48.816163385s" podCreationTimestamp="2026-01-05 21:53:43 +0000 UTC" firstStartedPulling="2026-01-05 21:53:45.015044653 +0000 UTC m=+156.592542323" lastFinishedPulling="2026-01-05 21:54:31.28409052 +0000 UTC m=+202.861588190" observedRunningTime="2026-01-05 21:54:31.812396906 +0000 UTC m=+203.389894576" watchObservedRunningTime="2026-01-05 21:54:31.816163385 +0000 UTC m=+203.393661055" Jan 05 21:54:31 crc kubenswrapper[4910]: I0105 21:54:31.892061 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 05 21:54:31 crc kubenswrapper[4910]: E0105 21:54:31.892524 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9401ebc0-64e8-48b8-9702-4c4dc6867695" containerName="pruner" Jan 05 21:54:31 crc kubenswrapper[4910]: I0105 21:54:31.892566 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="9401ebc0-64e8-48b8-9702-4c4dc6867695" containerName="pruner" Jan 05 21:54:31 crc kubenswrapper[4910]: I0105 21:54:31.893006 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="9401ebc0-64e8-48b8-9702-4c4dc6867695" containerName="pruner" Jan 05 21:54:31 crc kubenswrapper[4910]: I0105 21:54:31.893911 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 05 21:54:31 crc kubenswrapper[4910]: I0105 21:54:31.896298 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 05 21:54:31 crc kubenswrapper[4910]: I0105 21:54:31.896608 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 05 21:54:31 crc kubenswrapper[4910]: I0105 21:54:31.900365 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 05 21:54:32 crc kubenswrapper[4910]: I0105 21:54:32.072224 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ff686cfa-03a7-4c78-8efc-17407e5e79c0-kube-api-access\") pod \"installer-9-crc\" (UID: \"ff686cfa-03a7-4c78-8efc-17407e5e79c0\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 05 21:54:32 crc kubenswrapper[4910]: I0105 21:54:32.072747 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/ff686cfa-03a7-4c78-8efc-17407e5e79c0-var-lock\") pod \"installer-9-crc\" (UID: \"ff686cfa-03a7-4c78-8efc-17407e5e79c0\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 05 21:54:32 crc kubenswrapper[4910]: I0105 21:54:32.073035 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ff686cfa-03a7-4c78-8efc-17407e5e79c0-kubelet-dir\") pod \"installer-9-crc\" (UID: \"ff686cfa-03a7-4c78-8efc-17407e5e79c0\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 05 21:54:32 crc kubenswrapper[4910]: I0105 21:54:32.173961 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/ff686cfa-03a7-4c78-8efc-17407e5e79c0-var-lock\") pod \"installer-9-crc\" (UID: \"ff686cfa-03a7-4c78-8efc-17407e5e79c0\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 05 21:54:32 crc kubenswrapper[4910]: I0105 21:54:32.174072 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ff686cfa-03a7-4c78-8efc-17407e5e79c0-kubelet-dir\") pod \"installer-9-crc\" (UID: \"ff686cfa-03a7-4c78-8efc-17407e5e79c0\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 05 21:54:32 crc kubenswrapper[4910]: I0105 21:54:32.174138 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ff686cfa-03a7-4c78-8efc-17407e5e79c0-kube-api-access\") pod \"installer-9-crc\" (UID: \"ff686cfa-03a7-4c78-8efc-17407e5e79c0\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 05 21:54:32 crc kubenswrapper[4910]: I0105 21:54:32.174338 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/ff686cfa-03a7-4c78-8efc-17407e5e79c0-var-lock\") pod \"installer-9-crc\" (UID: \"ff686cfa-03a7-4c78-8efc-17407e5e79c0\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 05 21:54:32 crc kubenswrapper[4910]: I0105 21:54:32.174385 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ff686cfa-03a7-4c78-8efc-17407e5e79c0-kubelet-dir\") pod \"installer-9-crc\" (UID: \"ff686cfa-03a7-4c78-8efc-17407e5e79c0\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 05 21:54:32 crc kubenswrapper[4910]: I0105 21:54:32.192770 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ff686cfa-03a7-4c78-8efc-17407e5e79c0-kube-api-access\") pod \"installer-9-crc\" (UID: \"ff686cfa-03a7-4c78-8efc-17407e5e79c0\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 05 21:54:32 crc kubenswrapper[4910]: I0105 21:54:32.257641 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 05 21:54:32 crc kubenswrapper[4910]: I0105 21:54:32.673205 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 05 21:54:32 crc kubenswrapper[4910]: I0105 21:54:32.803748 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"ff686cfa-03a7-4c78-8efc-17407e5e79c0","Type":"ContainerStarted","Data":"08afc29a04e5e60e52bcefb1b7ba6414540dbfb0c940c2a225843b215d775751"} Jan 05 21:54:33 crc kubenswrapper[4910]: I0105 21:54:33.601790 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2hg8l" Jan 05 21:54:33 crc kubenswrapper[4910]: I0105 21:54:33.601849 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-2hg8l" Jan 05 21:54:33 crc kubenswrapper[4910]: I0105 21:54:33.698018 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2hg8l" Jan 05 21:54:33 crc kubenswrapper[4910]: I0105 21:54:33.811550 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"ff686cfa-03a7-4c78-8efc-17407e5e79c0","Type":"ContainerStarted","Data":"835e14a77c1b3239d8a5aed34d5d0e9b630634019d8fd581cd9a47169102c4ac"} Jan 05 21:54:34 crc kubenswrapper[4910]: I0105 21:54:34.837588 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=3.837564614 podStartE2EDuration="3.837564614s" podCreationTimestamp="2026-01-05 21:54:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:54:34.833658592 +0000 UTC m=+206.411156262" watchObservedRunningTime="2026-01-05 21:54:34.837564614 +0000 UTC m=+206.415062314" Jan 05 21:54:35 crc kubenswrapper[4910]: I0105 21:54:35.771001 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-gqzj7"] Jan 05 21:54:37 crc kubenswrapper[4910]: I0105 21:54:37.224547 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-9lxqq" Jan 05 21:54:37 crc kubenswrapper[4910]: I0105 21:54:37.267529 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-9lxqq" Jan 05 21:54:37 crc kubenswrapper[4910]: I0105 21:54:37.836873 4910 generic.go:334] "Generic (PLEG): container finished" podID="aef8fca5-e47a-4942-8f59-42731aa77419" containerID="b301d209896516545bf26ca508f258f10e9de8babf98a4c287e44540acdc48c0" exitCode=0 Jan 05 21:54:37 crc kubenswrapper[4910]: I0105 21:54:37.837008 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bz5df" event={"ID":"aef8fca5-e47a-4942-8f59-42731aa77419","Type":"ContainerDied","Data":"b301d209896516545bf26ca508f258f10e9de8babf98a4c287e44540acdc48c0"} Jan 05 21:54:37 crc kubenswrapper[4910]: I0105 21:54:37.838970 4910 generic.go:334] "Generic (PLEG): container finished" podID="93cdd0f0-6faf-4d13-b090-21afa1ae8f76" containerID="0c6f13f5faf6bb08ff6e4185f01fa7d9284a77f54b5141d125c8427fdf0a2a7d" exitCode=0 Jan 05 21:54:37 crc kubenswrapper[4910]: I0105 21:54:37.839039 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gnvct" event={"ID":"93cdd0f0-6faf-4d13-b090-21afa1ae8f76","Type":"ContainerDied","Data":"0c6f13f5faf6bb08ff6e4185f01fa7d9284a77f54b5141d125c8427fdf0a2a7d"} Jan 05 21:54:37 crc kubenswrapper[4910]: I0105 21:54:37.843833 4910 generic.go:334] "Generic (PLEG): container finished" podID="e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e" containerID="67a9ef8cc27153979357e9a345b762db7cbc41e4351199d0570991f38188d8fa" exitCode=0 Jan 05 21:54:37 crc kubenswrapper[4910]: I0105 21:54:37.848220 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pzbmf" event={"ID":"e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e","Type":"ContainerDied","Data":"67a9ef8cc27153979357e9a345b762db7cbc41e4351199d0570991f38188d8fa"} Jan 05 21:54:37 crc kubenswrapper[4910]: I0105 21:54:37.991600 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9lxqq"] Jan 05 21:54:38 crc kubenswrapper[4910]: I0105 21:54:38.852965 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gnvct" event={"ID":"93cdd0f0-6faf-4d13-b090-21afa1ae8f76","Type":"ContainerStarted","Data":"e75e555cc7723a320f237ca191471bf69de21971d8af85b58e3dc78c01bcd102"} Jan 05 21:54:38 crc kubenswrapper[4910]: I0105 21:54:38.856389 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pzbmf" event={"ID":"e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e","Type":"ContainerStarted","Data":"32c0bb0052253f8dd2c344748d2eb39984ef98f99f8d9815e4bcd9f36d764353"} Jan 05 21:54:38 crc kubenswrapper[4910]: I0105 21:54:38.859078 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bz5df" event={"ID":"aef8fca5-e47a-4942-8f59-42731aa77419","Type":"ContainerStarted","Data":"f7f2de2a34f2657243d9ff5da841291fadb39a3617ad4ea21647ad42f1cd938d"} Jan 05 21:54:38 crc kubenswrapper[4910]: I0105 21:54:38.859230 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9lxqq" podUID="554221c9-a077-40a9-a756-a9589d845ef7" containerName="registry-server" containerID="cri-o://785f614a9a7148e5ca73e575bb3e2dd6d8639ee93f0eba0173a608f044d98c1a" gracePeriod=2 Jan 05 21:54:38 crc kubenswrapper[4910]: I0105 21:54:38.872960 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-gnvct" podStartSLOduration=2.574917567 podStartE2EDuration="55.872931282s" podCreationTimestamp="2026-01-05 21:53:43 +0000 UTC" firstStartedPulling="2026-01-05 21:53:45.021306904 +0000 UTC m=+156.598804574" lastFinishedPulling="2026-01-05 21:54:38.319320619 +0000 UTC m=+209.896818289" observedRunningTime="2026-01-05 21:54:38.871252861 +0000 UTC m=+210.448750531" watchObservedRunningTime="2026-01-05 21:54:38.872931282 +0000 UTC m=+210.450428962" Jan 05 21:54:38 crc kubenswrapper[4910]: I0105 21:54:38.891585 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-pzbmf" podStartSLOduration=2.465432613 podStartE2EDuration="53.891562284s" podCreationTimestamp="2026-01-05 21:53:45 +0000 UTC" firstStartedPulling="2026-01-05 21:53:47.085271581 +0000 UTC m=+158.662769251" lastFinishedPulling="2026-01-05 21:54:38.511401252 +0000 UTC m=+210.088898922" observedRunningTime="2026-01-05 21:54:38.890194299 +0000 UTC m=+210.467691969" watchObservedRunningTime="2026-01-05 21:54:38.891562284 +0000 UTC m=+210.469059964" Jan 05 21:54:38 crc kubenswrapper[4910]: I0105 21:54:38.911330 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-bz5df" podStartSLOduration=2.802108159 podStartE2EDuration="53.911307327s" podCreationTimestamp="2026-01-05 21:53:45 +0000 UTC" firstStartedPulling="2026-01-05 21:53:47.152629164 +0000 UTC m=+158.730126834" lastFinishedPulling="2026-01-05 21:54:38.261828322 +0000 UTC m=+209.839326002" observedRunningTime="2026-01-05 21:54:38.908799641 +0000 UTC m=+210.486297311" watchObservedRunningTime="2026-01-05 21:54:38.911307327 +0000 UTC m=+210.488804997" Jan 05 21:54:39 crc kubenswrapper[4910]: I0105 21:54:39.415109 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9lxqq" Jan 05 21:54:39 crc kubenswrapper[4910]: I0105 21:54:39.522212 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/554221c9-a077-40a9-a756-a9589d845ef7-utilities\") pod \"554221c9-a077-40a9-a756-a9589d845ef7\" (UID: \"554221c9-a077-40a9-a756-a9589d845ef7\") " Jan 05 21:54:39 crc kubenswrapper[4910]: I0105 21:54:39.522280 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-db6wn\" (UniqueName: \"kubernetes.io/projected/554221c9-a077-40a9-a756-a9589d845ef7-kube-api-access-db6wn\") pod \"554221c9-a077-40a9-a756-a9589d845ef7\" (UID: \"554221c9-a077-40a9-a756-a9589d845ef7\") " Jan 05 21:54:39 crc kubenswrapper[4910]: I0105 21:54:39.522367 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/554221c9-a077-40a9-a756-a9589d845ef7-catalog-content\") pod \"554221c9-a077-40a9-a756-a9589d845ef7\" (UID: \"554221c9-a077-40a9-a756-a9589d845ef7\") " Jan 05 21:54:39 crc kubenswrapper[4910]: I0105 21:54:39.523631 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/554221c9-a077-40a9-a756-a9589d845ef7-utilities" (OuterVolumeSpecName: "utilities") pod "554221c9-a077-40a9-a756-a9589d845ef7" (UID: "554221c9-a077-40a9-a756-a9589d845ef7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 21:54:39 crc kubenswrapper[4910]: I0105 21:54:39.534402 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/554221c9-a077-40a9-a756-a9589d845ef7-kube-api-access-db6wn" (OuterVolumeSpecName: "kube-api-access-db6wn") pod "554221c9-a077-40a9-a756-a9589d845ef7" (UID: "554221c9-a077-40a9-a756-a9589d845ef7"). InnerVolumeSpecName "kube-api-access-db6wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:54:39 crc kubenswrapper[4910]: I0105 21:54:39.624104 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-db6wn\" (UniqueName: \"kubernetes.io/projected/554221c9-a077-40a9-a756-a9589d845ef7-kube-api-access-db6wn\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:39 crc kubenswrapper[4910]: I0105 21:54:39.624180 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/554221c9-a077-40a9-a756-a9589d845ef7-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:39 crc kubenswrapper[4910]: I0105 21:54:39.651598 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/554221c9-a077-40a9-a756-a9589d845ef7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "554221c9-a077-40a9-a756-a9589d845ef7" (UID: "554221c9-a077-40a9-a756-a9589d845ef7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 21:54:39 crc kubenswrapper[4910]: I0105 21:54:39.725558 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/554221c9-a077-40a9-a756-a9589d845ef7-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:39 crc kubenswrapper[4910]: I0105 21:54:39.866362 4910 generic.go:334] "Generic (PLEG): container finished" podID="e6181ab2-b292-4e7d-b30e-ec724946700c" containerID="7cc50a10a89158df13e3fc348e47890eca16367902d61414668e20c9165af525" exitCode=0 Jan 05 21:54:39 crc kubenswrapper[4910]: I0105 21:54:39.866504 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wbhgr" event={"ID":"e6181ab2-b292-4e7d-b30e-ec724946700c","Type":"ContainerDied","Data":"7cc50a10a89158df13e3fc348e47890eca16367902d61414668e20c9165af525"} Jan 05 21:54:39 crc kubenswrapper[4910]: I0105 21:54:39.873545 4910 generic.go:334] "Generic (PLEG): container finished" podID="e67293c9-fc75-468d-b1c5-c09f9ad46dda" containerID="236b3a0b6b653cbbb841b3d2bc4a4b5fa288b2e3ed97efa1ce46c334cf175393" exitCode=0 Jan 05 21:54:39 crc kubenswrapper[4910]: I0105 21:54:39.873678 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7tvk2" event={"ID":"e67293c9-fc75-468d-b1c5-c09f9ad46dda","Type":"ContainerDied","Data":"236b3a0b6b653cbbb841b3d2bc4a4b5fa288b2e3ed97efa1ce46c334cf175393"} Jan 05 21:54:39 crc kubenswrapper[4910]: I0105 21:54:39.880026 4910 generic.go:334] "Generic (PLEG): container finished" podID="554221c9-a077-40a9-a756-a9589d845ef7" containerID="785f614a9a7148e5ca73e575bb3e2dd6d8639ee93f0eba0173a608f044d98c1a" exitCode=0 Jan 05 21:54:39 crc kubenswrapper[4910]: I0105 21:54:39.880195 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9lxqq" event={"ID":"554221c9-a077-40a9-a756-a9589d845ef7","Type":"ContainerDied","Data":"785f614a9a7148e5ca73e575bb3e2dd6d8639ee93f0eba0173a608f044d98c1a"} Jan 05 21:54:39 crc kubenswrapper[4910]: I0105 21:54:39.880351 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9lxqq" Jan 05 21:54:39 crc kubenswrapper[4910]: I0105 21:54:39.880459 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9lxqq" event={"ID":"554221c9-a077-40a9-a756-a9589d845ef7","Type":"ContainerDied","Data":"f076cdf5f22138c572d4d49f6ff430a935801ae00968f42d470a34972a2a01a1"} Jan 05 21:54:39 crc kubenswrapper[4910]: I0105 21:54:39.880539 4910 scope.go:117] "RemoveContainer" containerID="785f614a9a7148e5ca73e575bb3e2dd6d8639ee93f0eba0173a608f044d98c1a" Jan 05 21:54:39 crc kubenswrapper[4910]: I0105 21:54:39.923633 4910 scope.go:117] "RemoveContainer" containerID="dbcfb43121fcf101d940c2147031039f48304a43c8c9b68f2a9f048e65875d1c" Jan 05 21:54:39 crc kubenswrapper[4910]: I0105 21:54:39.937424 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9lxqq"] Jan 05 21:54:39 crc kubenswrapper[4910]: I0105 21:54:39.940722 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-9lxqq"] Jan 05 21:54:39 crc kubenswrapper[4910]: I0105 21:54:39.948365 4910 scope.go:117] "RemoveContainer" containerID="f5e807c853de6fd82b6a82099000901d1e1e169f7b09244bae2a6d5839bfcf53" Jan 05 21:54:39 crc kubenswrapper[4910]: I0105 21:54:39.973684 4910 scope.go:117] "RemoveContainer" containerID="785f614a9a7148e5ca73e575bb3e2dd6d8639ee93f0eba0173a608f044d98c1a" Jan 05 21:54:39 crc kubenswrapper[4910]: E0105 21:54:39.975901 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"785f614a9a7148e5ca73e575bb3e2dd6d8639ee93f0eba0173a608f044d98c1a\": container with ID starting with 785f614a9a7148e5ca73e575bb3e2dd6d8639ee93f0eba0173a608f044d98c1a not found: ID does not exist" containerID="785f614a9a7148e5ca73e575bb3e2dd6d8639ee93f0eba0173a608f044d98c1a" Jan 05 21:54:39 crc kubenswrapper[4910]: I0105 21:54:39.975938 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"785f614a9a7148e5ca73e575bb3e2dd6d8639ee93f0eba0173a608f044d98c1a"} err="failed to get container status \"785f614a9a7148e5ca73e575bb3e2dd6d8639ee93f0eba0173a608f044d98c1a\": rpc error: code = NotFound desc = could not find container \"785f614a9a7148e5ca73e575bb3e2dd6d8639ee93f0eba0173a608f044d98c1a\": container with ID starting with 785f614a9a7148e5ca73e575bb3e2dd6d8639ee93f0eba0173a608f044d98c1a not found: ID does not exist" Jan 05 21:54:39 crc kubenswrapper[4910]: I0105 21:54:39.975969 4910 scope.go:117] "RemoveContainer" containerID="dbcfb43121fcf101d940c2147031039f48304a43c8c9b68f2a9f048e65875d1c" Jan 05 21:54:39 crc kubenswrapper[4910]: E0105 21:54:39.977437 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dbcfb43121fcf101d940c2147031039f48304a43c8c9b68f2a9f048e65875d1c\": container with ID starting with dbcfb43121fcf101d940c2147031039f48304a43c8c9b68f2a9f048e65875d1c not found: ID does not exist" containerID="dbcfb43121fcf101d940c2147031039f48304a43c8c9b68f2a9f048e65875d1c" Jan 05 21:54:39 crc kubenswrapper[4910]: I0105 21:54:39.977503 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dbcfb43121fcf101d940c2147031039f48304a43c8c9b68f2a9f048e65875d1c"} err="failed to get container status \"dbcfb43121fcf101d940c2147031039f48304a43c8c9b68f2a9f048e65875d1c\": rpc error: code = NotFound desc = could not find container \"dbcfb43121fcf101d940c2147031039f48304a43c8c9b68f2a9f048e65875d1c\": container with ID starting with dbcfb43121fcf101d940c2147031039f48304a43c8c9b68f2a9f048e65875d1c not found: ID does not exist" Jan 05 21:54:39 crc kubenswrapper[4910]: I0105 21:54:39.977600 4910 scope.go:117] "RemoveContainer" containerID="f5e807c853de6fd82b6a82099000901d1e1e169f7b09244bae2a6d5839bfcf53" Jan 05 21:54:39 crc kubenswrapper[4910]: E0105 21:54:39.980887 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f5e807c853de6fd82b6a82099000901d1e1e169f7b09244bae2a6d5839bfcf53\": container with ID starting with f5e807c853de6fd82b6a82099000901d1e1e169f7b09244bae2a6d5839bfcf53 not found: ID does not exist" containerID="f5e807c853de6fd82b6a82099000901d1e1e169f7b09244bae2a6d5839bfcf53" Jan 05 21:54:39 crc kubenswrapper[4910]: I0105 21:54:39.980977 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f5e807c853de6fd82b6a82099000901d1e1e169f7b09244bae2a6d5839bfcf53"} err="failed to get container status \"f5e807c853de6fd82b6a82099000901d1e1e169f7b09244bae2a6d5839bfcf53\": rpc error: code = NotFound desc = could not find container \"f5e807c853de6fd82b6a82099000901d1e1e169f7b09244bae2a6d5839bfcf53\": container with ID starting with f5e807c853de6fd82b6a82099000901d1e1e169f7b09244bae2a6d5839bfcf53 not found: ID does not exist" Jan 05 21:54:40 crc kubenswrapper[4910]: I0105 21:54:40.729156 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="554221c9-a077-40a9-a756-a9589d845ef7" path="/var/lib/kubelet/pods/554221c9-a077-40a9-a756-a9589d845ef7/volumes" Jan 05 21:54:40 crc kubenswrapper[4910]: I0105 21:54:40.887723 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wbhgr" event={"ID":"e6181ab2-b292-4e7d-b30e-ec724946700c","Type":"ContainerStarted","Data":"e4e27be6d7192d5b7147144ed0072b6b457e1741a44feaa7593e25529a2e61f7"} Jan 05 21:54:40 crc kubenswrapper[4910]: I0105 21:54:40.890219 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7tvk2" event={"ID":"e67293c9-fc75-468d-b1c5-c09f9ad46dda","Type":"ContainerStarted","Data":"1de8eacb26cef6b46de2bc9cf5e247e88efa6be2b0bce9cd969f18aa75dd4c17"} Jan 05 21:54:40 crc kubenswrapper[4910]: I0105 21:54:40.892730 4910 generic.go:334] "Generic (PLEG): container finished" podID="060b3be3-5d9d-47dc-a01e-7a79aa9f13b4" containerID="ab50328fcc6803e3c5b0e3be08e19603836f1c6c27e45818153c2d050ce2fbbc" exitCode=0 Jan 05 21:54:40 crc kubenswrapper[4910]: I0105 21:54:40.892776 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ghpct" event={"ID":"060b3be3-5d9d-47dc-a01e-7a79aa9f13b4","Type":"ContainerDied","Data":"ab50328fcc6803e3c5b0e3be08e19603836f1c6c27e45818153c2d050ce2fbbc"} Jan 05 21:54:40 crc kubenswrapper[4910]: I0105 21:54:40.903735 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-wbhgr" podStartSLOduration=2.6395134110000003 podStartE2EDuration="57.903712281s" podCreationTimestamp="2026-01-05 21:53:43 +0000 UTC" firstStartedPulling="2026-01-05 21:53:45.018616792 +0000 UTC m=+156.596114462" lastFinishedPulling="2026-01-05 21:54:40.282815662 +0000 UTC m=+211.860313332" observedRunningTime="2026-01-05 21:54:40.902933456 +0000 UTC m=+212.480431126" watchObservedRunningTime="2026-01-05 21:54:40.903712281 +0000 UTC m=+212.481209961" Jan 05 21:54:40 crc kubenswrapper[4910]: I0105 21:54:40.929455 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-7tvk2" podStartSLOduration=2.588877261 podStartE2EDuration="57.929429534s" podCreationTimestamp="2026-01-05 21:53:43 +0000 UTC" firstStartedPulling="2026-01-05 21:53:45.008071819 +0000 UTC m=+156.585569489" lastFinishedPulling="2026-01-05 21:54:40.348624092 +0000 UTC m=+211.926121762" observedRunningTime="2026-01-05 21:54:40.922519486 +0000 UTC m=+212.500017156" watchObservedRunningTime="2026-01-05 21:54:40.929429534 +0000 UTC m=+212.506927264" Jan 05 21:54:40 crc kubenswrapper[4910]: I0105 21:54:40.952700 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 21:54:40 crc kubenswrapper[4910]: I0105 21:54:40.952770 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 21:54:40 crc kubenswrapper[4910]: I0105 21:54:40.952828 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 21:54:40 crc kubenswrapper[4910]: I0105 21:54:40.953618 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb"} pod="openshift-machine-config-operator/machine-config-daemon-p4t85" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 05 21:54:40 crc kubenswrapper[4910]: I0105 21:54:40.953691 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" containerID="cri-o://c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb" gracePeriod=600 Jan 05 21:54:41 crc kubenswrapper[4910]: I0105 21:54:41.901315 4910 generic.go:334] "Generic (PLEG): container finished" podID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerID="c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb" exitCode=0 Jan 05 21:54:41 crc kubenswrapper[4910]: I0105 21:54:41.901394 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerDied","Data":"c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb"} Jan 05 21:54:41 crc kubenswrapper[4910]: I0105 21:54:41.902221 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerStarted","Data":"15612c8cccfa06b0cc74957c3ccd1b20e53a5417a6eefdbf59c2e8cdfb185ad1"} Jan 05 21:54:42 crc kubenswrapper[4910]: I0105 21:54:42.909051 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ghpct" event={"ID":"060b3be3-5d9d-47dc-a01e-7a79aa9f13b4","Type":"ContainerStarted","Data":"29971467a3dcb3c04d6a710368043b3869f40ed8b96ccb86c936ddf566fc632c"} Jan 05 21:54:43 crc kubenswrapper[4910]: I0105 21:54:43.642178 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2hg8l" Jan 05 21:54:43 crc kubenswrapper[4910]: I0105 21:54:43.657828 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-ghpct" podStartSLOduration=4.143275357 podStartE2EDuration="57.657808604s" podCreationTimestamp="2026-01-05 21:53:46 +0000 UTC" firstStartedPulling="2026-01-05 21:53:48.183762687 +0000 UTC m=+159.761260347" lastFinishedPulling="2026-01-05 21:54:41.698295934 +0000 UTC m=+213.275793594" observedRunningTime="2026-01-05 21:54:42.928572452 +0000 UTC m=+214.506070132" watchObservedRunningTime="2026-01-05 21:54:43.657808604 +0000 UTC m=+215.235306274" Jan 05 21:54:43 crc kubenswrapper[4910]: I0105 21:54:43.788283 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-7tvk2" Jan 05 21:54:43 crc kubenswrapper[4910]: I0105 21:54:43.788354 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-7tvk2" Jan 05 21:54:43 crc kubenswrapper[4910]: I0105 21:54:43.832182 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-7tvk2" Jan 05 21:54:44 crc kubenswrapper[4910]: I0105 21:54:44.031337 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-wbhgr" Jan 05 21:54:44 crc kubenswrapper[4910]: I0105 21:54:44.031387 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-wbhgr" Jan 05 21:54:44 crc kubenswrapper[4910]: I0105 21:54:44.074812 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-wbhgr" Jan 05 21:54:44 crc kubenswrapper[4910]: I0105 21:54:44.189065 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-gnvct" Jan 05 21:54:44 crc kubenswrapper[4910]: I0105 21:54:44.189489 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-gnvct" Jan 05 21:54:44 crc kubenswrapper[4910]: I0105 21:54:44.227485 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-gnvct" Jan 05 21:54:44 crc kubenswrapper[4910]: I0105 21:54:44.960927 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-gnvct" Jan 05 21:54:45 crc kubenswrapper[4910]: I0105 21:54:45.843831 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-pzbmf" Jan 05 21:54:45 crc kubenswrapper[4910]: I0105 21:54:45.844817 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-pzbmf" Jan 05 21:54:45 crc kubenswrapper[4910]: I0105 21:54:45.880582 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-pzbmf" Jan 05 21:54:45 crc kubenswrapper[4910]: I0105 21:54:45.971667 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-pzbmf" Jan 05 21:54:46 crc kubenswrapper[4910]: I0105 21:54:46.196470 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-bz5df" Jan 05 21:54:46 crc kubenswrapper[4910]: I0105 21:54:46.197553 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-bz5df" Jan 05 21:54:46 crc kubenswrapper[4910]: I0105 21:54:46.241569 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-bz5df" Jan 05 21:54:46 crc kubenswrapper[4910]: I0105 21:54:46.855834 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-ghpct" Jan 05 21:54:46 crc kubenswrapper[4910]: I0105 21:54:46.855887 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-ghpct" Jan 05 21:54:46 crc kubenswrapper[4910]: I0105 21:54:46.977698 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-bz5df" Jan 05 21:54:47 crc kubenswrapper[4910]: I0105 21:54:47.900855 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ghpct" podUID="060b3be3-5d9d-47dc-a01e-7a79aa9f13b4" containerName="registry-server" probeResult="failure" output=< Jan 05 21:54:47 crc kubenswrapper[4910]: timeout: failed to connect service ":50051" within 1s Jan 05 21:54:47 crc kubenswrapper[4910]: > Jan 05 21:54:48 crc kubenswrapper[4910]: I0105 21:54:48.196753 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gnvct"] Jan 05 21:54:48 crc kubenswrapper[4910]: I0105 21:54:48.197261 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-gnvct" podUID="93cdd0f0-6faf-4d13-b090-21afa1ae8f76" containerName="registry-server" containerID="cri-o://e75e555cc7723a320f237ca191471bf69de21971d8af85b58e3dc78c01bcd102" gracePeriod=2 Jan 05 21:54:48 crc kubenswrapper[4910]: I0105 21:54:48.393887 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bz5df"] Jan 05 21:54:48 crc kubenswrapper[4910]: I0105 21:54:48.952410 4910 generic.go:334] "Generic (PLEG): container finished" podID="93cdd0f0-6faf-4d13-b090-21afa1ae8f76" containerID="e75e555cc7723a320f237ca191471bf69de21971d8af85b58e3dc78c01bcd102" exitCode=0 Jan 05 21:54:48 crc kubenswrapper[4910]: I0105 21:54:48.953492 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gnvct" event={"ID":"93cdd0f0-6faf-4d13-b090-21afa1ae8f76","Type":"ContainerDied","Data":"e75e555cc7723a320f237ca191471bf69de21971d8af85b58e3dc78c01bcd102"} Jan 05 21:54:49 crc kubenswrapper[4910]: I0105 21:54:49.843328 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gnvct" Jan 05 21:54:49 crc kubenswrapper[4910]: I0105 21:54:49.959977 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gnvct" Jan 05 21:54:49 crc kubenswrapper[4910]: I0105 21:54:49.959961 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gnvct" event={"ID":"93cdd0f0-6faf-4d13-b090-21afa1ae8f76","Type":"ContainerDied","Data":"1ecd8d2cc3bdb5f5163b1a0b90f5d03d7e66b1f79cc4079b0f293173ef6b1a2b"} Jan 05 21:54:49 crc kubenswrapper[4910]: I0105 21:54:49.960830 4910 scope.go:117] "RemoveContainer" containerID="e75e555cc7723a320f237ca191471bf69de21971d8af85b58e3dc78c01bcd102" Jan 05 21:54:49 crc kubenswrapper[4910]: I0105 21:54:49.960108 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-bz5df" podUID="aef8fca5-e47a-4942-8f59-42731aa77419" containerName="registry-server" containerID="cri-o://f7f2de2a34f2657243d9ff5da841291fadb39a3617ad4ea21647ad42f1cd938d" gracePeriod=2 Jan 05 21:54:49 crc kubenswrapper[4910]: I0105 21:54:49.986037 4910 scope.go:117] "RemoveContainer" containerID="0c6f13f5faf6bb08ff6e4185f01fa7d9284a77f54b5141d125c8427fdf0a2a7d" Jan 05 21:54:49 crc kubenswrapper[4910]: I0105 21:54:49.993579 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93cdd0f0-6faf-4d13-b090-21afa1ae8f76-catalog-content\") pod \"93cdd0f0-6faf-4d13-b090-21afa1ae8f76\" (UID: \"93cdd0f0-6faf-4d13-b090-21afa1ae8f76\") " Jan 05 21:54:49 crc kubenswrapper[4910]: I0105 21:54:49.993617 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93cdd0f0-6faf-4d13-b090-21afa1ae8f76-utilities\") pod \"93cdd0f0-6faf-4d13-b090-21afa1ae8f76\" (UID: \"93cdd0f0-6faf-4d13-b090-21afa1ae8f76\") " Jan 05 21:54:49 crc kubenswrapper[4910]: I0105 21:54:49.993736 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4qfsm\" (UniqueName: \"kubernetes.io/projected/93cdd0f0-6faf-4d13-b090-21afa1ae8f76-kube-api-access-4qfsm\") pod \"93cdd0f0-6faf-4d13-b090-21afa1ae8f76\" (UID: \"93cdd0f0-6faf-4d13-b090-21afa1ae8f76\") " Jan 05 21:54:49 crc kubenswrapper[4910]: I0105 21:54:49.995491 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93cdd0f0-6faf-4d13-b090-21afa1ae8f76-utilities" (OuterVolumeSpecName: "utilities") pod "93cdd0f0-6faf-4d13-b090-21afa1ae8f76" (UID: "93cdd0f0-6faf-4d13-b090-21afa1ae8f76"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 21:54:50 crc kubenswrapper[4910]: I0105 21:54:50.002452 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93cdd0f0-6faf-4d13-b090-21afa1ae8f76-kube-api-access-4qfsm" (OuterVolumeSpecName: "kube-api-access-4qfsm") pod "93cdd0f0-6faf-4d13-b090-21afa1ae8f76" (UID: "93cdd0f0-6faf-4d13-b090-21afa1ae8f76"). InnerVolumeSpecName "kube-api-access-4qfsm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:54:50 crc kubenswrapper[4910]: I0105 21:54:50.045906 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93cdd0f0-6faf-4d13-b090-21afa1ae8f76-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "93cdd0f0-6faf-4d13-b090-21afa1ae8f76" (UID: "93cdd0f0-6faf-4d13-b090-21afa1ae8f76"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 21:54:50 crc kubenswrapper[4910]: I0105 21:54:50.092360 4910 scope.go:117] "RemoveContainer" containerID="0fc196719dd50a4e78c57b2b67a47231aef6776056e67108568ddc5972cea35a" Jan 05 21:54:50 crc kubenswrapper[4910]: I0105 21:54:50.095864 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93cdd0f0-6faf-4d13-b090-21afa1ae8f76-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:50 crc kubenswrapper[4910]: I0105 21:54:50.095898 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4qfsm\" (UniqueName: \"kubernetes.io/projected/93cdd0f0-6faf-4d13-b090-21afa1ae8f76-kube-api-access-4qfsm\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:50 crc kubenswrapper[4910]: I0105 21:54:50.095944 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93cdd0f0-6faf-4d13-b090-21afa1ae8f76-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:50 crc kubenswrapper[4910]: I0105 21:54:50.291760 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gnvct"] Jan 05 21:54:50 crc kubenswrapper[4910]: I0105 21:54:50.295237 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-gnvct"] Jan 05 21:54:50 crc kubenswrapper[4910]: I0105 21:54:50.732901 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93cdd0f0-6faf-4d13-b090-21afa1ae8f76" path="/var/lib/kubelet/pods/93cdd0f0-6faf-4d13-b090-21afa1ae8f76/volumes" Jan 05 21:54:50 crc kubenswrapper[4910]: I0105 21:54:50.967692 4910 generic.go:334] "Generic (PLEG): container finished" podID="aef8fca5-e47a-4942-8f59-42731aa77419" containerID="f7f2de2a34f2657243d9ff5da841291fadb39a3617ad4ea21647ad42f1cd938d" exitCode=0 Jan 05 21:54:50 crc kubenswrapper[4910]: I0105 21:54:50.967805 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bz5df" event={"ID":"aef8fca5-e47a-4942-8f59-42731aa77419","Type":"ContainerDied","Data":"f7f2de2a34f2657243d9ff5da841291fadb39a3617ad4ea21647ad42f1cd938d"} Jan 05 21:54:51 crc kubenswrapper[4910]: I0105 21:54:51.737727 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bz5df" Jan 05 21:54:51 crc kubenswrapper[4910]: I0105 21:54:51.926728 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aef8fca5-e47a-4942-8f59-42731aa77419-utilities\") pod \"aef8fca5-e47a-4942-8f59-42731aa77419\" (UID: \"aef8fca5-e47a-4942-8f59-42731aa77419\") " Jan 05 21:54:51 crc kubenswrapper[4910]: I0105 21:54:51.926799 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aef8fca5-e47a-4942-8f59-42731aa77419-catalog-content\") pod \"aef8fca5-e47a-4942-8f59-42731aa77419\" (UID: \"aef8fca5-e47a-4942-8f59-42731aa77419\") " Jan 05 21:54:51 crc kubenswrapper[4910]: I0105 21:54:51.926875 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8gqh6\" (UniqueName: \"kubernetes.io/projected/aef8fca5-e47a-4942-8f59-42731aa77419-kube-api-access-8gqh6\") pod \"aef8fca5-e47a-4942-8f59-42731aa77419\" (UID: \"aef8fca5-e47a-4942-8f59-42731aa77419\") " Jan 05 21:54:51 crc kubenswrapper[4910]: I0105 21:54:51.930340 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aef8fca5-e47a-4942-8f59-42731aa77419-kube-api-access-8gqh6" (OuterVolumeSpecName: "kube-api-access-8gqh6") pod "aef8fca5-e47a-4942-8f59-42731aa77419" (UID: "aef8fca5-e47a-4942-8f59-42731aa77419"). InnerVolumeSpecName "kube-api-access-8gqh6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:54:51 crc kubenswrapper[4910]: I0105 21:54:51.933270 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aef8fca5-e47a-4942-8f59-42731aa77419-utilities" (OuterVolumeSpecName: "utilities") pod "aef8fca5-e47a-4942-8f59-42731aa77419" (UID: "aef8fca5-e47a-4942-8f59-42731aa77419"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 21:54:51 crc kubenswrapper[4910]: I0105 21:54:51.949524 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aef8fca5-e47a-4942-8f59-42731aa77419-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "aef8fca5-e47a-4942-8f59-42731aa77419" (UID: "aef8fca5-e47a-4942-8f59-42731aa77419"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 21:54:51 crc kubenswrapper[4910]: I0105 21:54:51.976874 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bz5df" event={"ID":"aef8fca5-e47a-4942-8f59-42731aa77419","Type":"ContainerDied","Data":"a86b1a05555c7de4cc3d6cf483140513db4ef46822b02d48e0b9a8cd7525588f"} Jan 05 21:54:51 crc kubenswrapper[4910]: I0105 21:54:51.976925 4910 scope.go:117] "RemoveContainer" containerID="f7f2de2a34f2657243d9ff5da841291fadb39a3617ad4ea21647ad42f1cd938d" Jan 05 21:54:51 crc kubenswrapper[4910]: I0105 21:54:51.976926 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bz5df" Jan 05 21:54:51 crc kubenswrapper[4910]: I0105 21:54:51.994688 4910 scope.go:117] "RemoveContainer" containerID="b301d209896516545bf26ca508f258f10e9de8babf98a4c287e44540acdc48c0" Jan 05 21:54:52 crc kubenswrapper[4910]: I0105 21:54:52.002784 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bz5df"] Jan 05 21:54:52 crc kubenswrapper[4910]: I0105 21:54:52.005728 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-bz5df"] Jan 05 21:54:52 crc kubenswrapper[4910]: I0105 21:54:52.011349 4910 scope.go:117] "RemoveContainer" containerID="1ad84bad5c219f9535fd051f0a04f2f99c192ffe95b26f01de4a34eac5bf1fc3" Jan 05 21:54:52 crc kubenswrapper[4910]: I0105 21:54:52.029051 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8gqh6\" (UniqueName: \"kubernetes.io/projected/aef8fca5-e47a-4942-8f59-42731aa77419-kube-api-access-8gqh6\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:52 crc kubenswrapper[4910]: I0105 21:54:52.029076 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aef8fca5-e47a-4942-8f59-42731aa77419-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:52 crc kubenswrapper[4910]: I0105 21:54:52.029086 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aef8fca5-e47a-4942-8f59-42731aa77419-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:52 crc kubenswrapper[4910]: I0105 21:54:52.732606 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aef8fca5-e47a-4942-8f59-42731aa77419" path="/var/lib/kubelet/pods/aef8fca5-e47a-4942-8f59-42731aa77419/volumes" Jan 05 21:54:53 crc kubenswrapper[4910]: I0105 21:54:53.850005 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-7tvk2" Jan 05 21:54:54 crc kubenswrapper[4910]: I0105 21:54:54.069992 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-wbhgr" Jan 05 21:54:56 crc kubenswrapper[4910]: I0105 21:54:56.597152 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wbhgr"] Jan 05 21:54:56 crc kubenswrapper[4910]: I0105 21:54:56.598393 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-wbhgr" podUID="e6181ab2-b292-4e7d-b30e-ec724946700c" containerName="registry-server" containerID="cri-o://e4e27be6d7192d5b7147144ed0072b6b457e1741a44feaa7593e25529a2e61f7" gracePeriod=2 Jan 05 21:54:56 crc kubenswrapper[4910]: I0105 21:54:56.915916 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-ghpct" Jan 05 21:54:56 crc kubenswrapper[4910]: I0105 21:54:56.978058 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-ghpct" Jan 05 21:54:57 crc kubenswrapper[4910]: I0105 21:54:57.026281 4910 generic.go:334] "Generic (PLEG): container finished" podID="e6181ab2-b292-4e7d-b30e-ec724946700c" containerID="e4e27be6d7192d5b7147144ed0072b6b457e1741a44feaa7593e25529a2e61f7" exitCode=0 Jan 05 21:54:57 crc kubenswrapper[4910]: I0105 21:54:57.026338 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wbhgr" event={"ID":"e6181ab2-b292-4e7d-b30e-ec724946700c","Type":"ContainerDied","Data":"e4e27be6d7192d5b7147144ed0072b6b457e1741a44feaa7593e25529a2e61f7"} Jan 05 21:54:57 crc kubenswrapper[4910]: I0105 21:54:57.062903 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wbhgr" Jan 05 21:54:57 crc kubenswrapper[4910]: I0105 21:54:57.219843 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6181ab2-b292-4e7d-b30e-ec724946700c-utilities\") pod \"e6181ab2-b292-4e7d-b30e-ec724946700c\" (UID: \"e6181ab2-b292-4e7d-b30e-ec724946700c\") " Jan 05 21:54:57 crc kubenswrapper[4910]: I0105 21:54:57.220002 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8ccnl\" (UniqueName: \"kubernetes.io/projected/e6181ab2-b292-4e7d-b30e-ec724946700c-kube-api-access-8ccnl\") pod \"e6181ab2-b292-4e7d-b30e-ec724946700c\" (UID: \"e6181ab2-b292-4e7d-b30e-ec724946700c\") " Jan 05 21:54:57 crc kubenswrapper[4910]: I0105 21:54:57.220073 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6181ab2-b292-4e7d-b30e-ec724946700c-catalog-content\") pod \"e6181ab2-b292-4e7d-b30e-ec724946700c\" (UID: \"e6181ab2-b292-4e7d-b30e-ec724946700c\") " Jan 05 21:54:57 crc kubenswrapper[4910]: I0105 21:54:57.221007 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6181ab2-b292-4e7d-b30e-ec724946700c-utilities" (OuterVolumeSpecName: "utilities") pod "e6181ab2-b292-4e7d-b30e-ec724946700c" (UID: "e6181ab2-b292-4e7d-b30e-ec724946700c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 21:54:57 crc kubenswrapper[4910]: I0105 21:54:57.233256 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6181ab2-b292-4e7d-b30e-ec724946700c-kube-api-access-8ccnl" (OuterVolumeSpecName: "kube-api-access-8ccnl") pod "e6181ab2-b292-4e7d-b30e-ec724946700c" (UID: "e6181ab2-b292-4e7d-b30e-ec724946700c"). InnerVolumeSpecName "kube-api-access-8ccnl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:54:57 crc kubenswrapper[4910]: I0105 21:54:57.274962 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6181ab2-b292-4e7d-b30e-ec724946700c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e6181ab2-b292-4e7d-b30e-ec724946700c" (UID: "e6181ab2-b292-4e7d-b30e-ec724946700c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 21:54:57 crc kubenswrapper[4910]: I0105 21:54:57.321813 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6181ab2-b292-4e7d-b30e-ec724946700c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:57 crc kubenswrapper[4910]: I0105 21:54:57.321861 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6181ab2-b292-4e7d-b30e-ec724946700c-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:57 crc kubenswrapper[4910]: I0105 21:54:57.321872 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8ccnl\" (UniqueName: \"kubernetes.io/projected/e6181ab2-b292-4e7d-b30e-ec724946700c-kube-api-access-8ccnl\") on node \"crc\" DevicePath \"\"" Jan 05 21:54:58 crc kubenswrapper[4910]: I0105 21:54:58.035969 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wbhgr" event={"ID":"e6181ab2-b292-4e7d-b30e-ec724946700c","Type":"ContainerDied","Data":"3233301218f1b7f1b73dda7a82aada9098161b42124c95190d6f9d50f5a0cb52"} Jan 05 21:54:58 crc kubenswrapper[4910]: I0105 21:54:58.036038 4910 scope.go:117] "RemoveContainer" containerID="e4e27be6d7192d5b7147144ed0072b6b457e1741a44feaa7593e25529a2e61f7" Jan 05 21:54:58 crc kubenswrapper[4910]: I0105 21:54:58.036037 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wbhgr" Jan 05 21:54:58 crc kubenswrapper[4910]: I0105 21:54:58.057389 4910 scope.go:117] "RemoveContainer" containerID="7cc50a10a89158df13e3fc348e47890eca16367902d61414668e20c9165af525" Jan 05 21:54:58 crc kubenswrapper[4910]: I0105 21:54:58.078803 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wbhgr"] Jan 05 21:54:58 crc kubenswrapper[4910]: I0105 21:54:58.079022 4910 scope.go:117] "RemoveContainer" containerID="1e1e03a4588bd995d0066b850743aab6118d35030c133dfc31ade0f9a78e2c5b" Jan 05 21:54:58 crc kubenswrapper[4910]: I0105 21:54:58.082764 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-wbhgr"] Jan 05 21:54:58 crc kubenswrapper[4910]: I0105 21:54:58.729096 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6181ab2-b292-4e7d-b30e-ec724946700c" path="/var/lib/kubelet/pods/e6181ab2-b292-4e7d-b30e-ec724946700c/volumes" Jan 05 21:54:59 crc kubenswrapper[4910]: I0105 21:54:59.531091 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-c9bcd9746-bwbfh"] Jan 05 21:54:59 crc kubenswrapper[4910]: I0105 21:54:59.531535 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-c9bcd9746-bwbfh" podUID="1b7102a7-347a-4025-bc40-6c78fddf35af" containerName="controller-manager" containerID="cri-o://d55aca133b5d9f970e190a25805974ecd2324a431f45d886466f2f62cd595f18" gracePeriod=30 Jan 05 21:54:59 crc kubenswrapper[4910]: I0105 21:54:59.626211 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-54c78b4894-sdlgm"] Jan 05 21:54:59 crc kubenswrapper[4910]: I0105 21:54:59.626957 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-54c78b4894-sdlgm" podUID="1f60515c-9ab2-40c3-a430-2ff330f483e6" containerName="route-controller-manager" containerID="cri-o://cc6a4f19363c913fe18c7b2e849509266544bcd242c3feb4478d7afdd0841224" gracePeriod=30 Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.029709 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-c9bcd9746-bwbfh" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.044722 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-54c78b4894-sdlgm" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.049180 4910 generic.go:334] "Generic (PLEG): container finished" podID="1f60515c-9ab2-40c3-a430-2ff330f483e6" containerID="cc6a4f19363c913fe18c7b2e849509266544bcd242c3feb4478d7afdd0841224" exitCode=0 Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.049274 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-54c78b4894-sdlgm" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.049291 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-54c78b4894-sdlgm" event={"ID":"1f60515c-9ab2-40c3-a430-2ff330f483e6","Type":"ContainerDied","Data":"cc6a4f19363c913fe18c7b2e849509266544bcd242c3feb4478d7afdd0841224"} Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.049349 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-54c78b4894-sdlgm" event={"ID":"1f60515c-9ab2-40c3-a430-2ff330f483e6","Type":"ContainerDied","Data":"3bf94ada5b932c2aa7216de0c02754dd4c4b450bbf11a68e10dc55f0e8f502cc"} Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.049371 4910 scope.go:117] "RemoveContainer" containerID="cc6a4f19363c913fe18c7b2e849509266544bcd242c3feb4478d7afdd0841224" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.051113 4910 generic.go:334] "Generic (PLEG): container finished" podID="1b7102a7-347a-4025-bc40-6c78fddf35af" containerID="d55aca133b5d9f970e190a25805974ecd2324a431f45d886466f2f62cd595f18" exitCode=0 Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.051164 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-c9bcd9746-bwbfh" event={"ID":"1b7102a7-347a-4025-bc40-6c78fddf35af","Type":"ContainerDied","Data":"d55aca133b5d9f970e190a25805974ecd2324a431f45d886466f2f62cd595f18"} Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.051199 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-c9bcd9746-bwbfh" event={"ID":"1b7102a7-347a-4025-bc40-6c78fddf35af","Type":"ContainerDied","Data":"b6b8d31f3c477f62f2054029b7c0c4693b8daf6f336a427b33b2becbe73488b5"} Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.051170 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-c9bcd9746-bwbfh" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.069530 4910 scope.go:117] "RemoveContainer" containerID="cc6a4f19363c913fe18c7b2e849509266544bcd242c3feb4478d7afdd0841224" Jan 05 21:55:00 crc kubenswrapper[4910]: E0105 21:55:00.070092 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc6a4f19363c913fe18c7b2e849509266544bcd242c3feb4478d7afdd0841224\": container with ID starting with cc6a4f19363c913fe18c7b2e849509266544bcd242c3feb4478d7afdd0841224 not found: ID does not exist" containerID="cc6a4f19363c913fe18c7b2e849509266544bcd242c3feb4478d7afdd0841224" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.070154 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc6a4f19363c913fe18c7b2e849509266544bcd242c3feb4478d7afdd0841224"} err="failed to get container status \"cc6a4f19363c913fe18c7b2e849509266544bcd242c3feb4478d7afdd0841224\": rpc error: code = NotFound desc = could not find container \"cc6a4f19363c913fe18c7b2e849509266544bcd242c3feb4478d7afdd0841224\": container with ID starting with cc6a4f19363c913fe18c7b2e849509266544bcd242c3feb4478d7afdd0841224 not found: ID does not exist" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.070187 4910 scope.go:117] "RemoveContainer" containerID="d55aca133b5d9f970e190a25805974ecd2324a431f45d886466f2f62cd595f18" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.100410 4910 scope.go:117] "RemoveContainer" containerID="d55aca133b5d9f970e190a25805974ecd2324a431f45d886466f2f62cd595f18" Jan 05 21:55:00 crc kubenswrapper[4910]: E0105 21:55:00.100938 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d55aca133b5d9f970e190a25805974ecd2324a431f45d886466f2f62cd595f18\": container with ID starting with d55aca133b5d9f970e190a25805974ecd2324a431f45d886466f2f62cd595f18 not found: ID does not exist" containerID="d55aca133b5d9f970e190a25805974ecd2324a431f45d886466f2f62cd595f18" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.100984 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d55aca133b5d9f970e190a25805974ecd2324a431f45d886466f2f62cd595f18"} err="failed to get container status \"d55aca133b5d9f970e190a25805974ecd2324a431f45d886466f2f62cd595f18\": rpc error: code = NotFound desc = could not find container \"d55aca133b5d9f970e190a25805974ecd2324a431f45d886466f2f62cd595f18\": container with ID starting with d55aca133b5d9f970e190a25805974ecd2324a431f45d886466f2f62cd595f18 not found: ID does not exist" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.162941 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rpzs4\" (UniqueName: \"kubernetes.io/projected/1b7102a7-347a-4025-bc40-6c78fddf35af-kube-api-access-rpzs4\") pod \"1b7102a7-347a-4025-bc40-6c78fddf35af\" (UID: \"1b7102a7-347a-4025-bc40-6c78fddf35af\") " Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.163024 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1f60515c-9ab2-40c3-a430-2ff330f483e6-client-ca\") pod \"1f60515c-9ab2-40c3-a430-2ff330f483e6\" (UID: \"1f60515c-9ab2-40c3-a430-2ff330f483e6\") " Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.163068 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f60515c-9ab2-40c3-a430-2ff330f483e6-config\") pod \"1f60515c-9ab2-40c3-a430-2ff330f483e6\" (UID: \"1f60515c-9ab2-40c3-a430-2ff330f483e6\") " Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.163097 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b7102a7-347a-4025-bc40-6c78fddf35af-config\") pod \"1b7102a7-347a-4025-bc40-6c78fddf35af\" (UID: \"1b7102a7-347a-4025-bc40-6c78fddf35af\") " Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.163139 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1b7102a7-347a-4025-bc40-6c78fddf35af-serving-cert\") pod \"1b7102a7-347a-4025-bc40-6c78fddf35af\" (UID: \"1b7102a7-347a-4025-bc40-6c78fddf35af\") " Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.163204 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1b7102a7-347a-4025-bc40-6c78fddf35af-proxy-ca-bundles\") pod \"1b7102a7-347a-4025-bc40-6c78fddf35af\" (UID: \"1b7102a7-347a-4025-bc40-6c78fddf35af\") " Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.163240 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1f60515c-9ab2-40c3-a430-2ff330f483e6-serving-cert\") pod \"1f60515c-9ab2-40c3-a430-2ff330f483e6\" (UID: \"1f60515c-9ab2-40c3-a430-2ff330f483e6\") " Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.163305 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7sfts\" (UniqueName: \"kubernetes.io/projected/1f60515c-9ab2-40c3-a430-2ff330f483e6-kube-api-access-7sfts\") pod \"1f60515c-9ab2-40c3-a430-2ff330f483e6\" (UID: \"1f60515c-9ab2-40c3-a430-2ff330f483e6\") " Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.163361 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1b7102a7-347a-4025-bc40-6c78fddf35af-client-ca\") pod \"1b7102a7-347a-4025-bc40-6c78fddf35af\" (UID: \"1b7102a7-347a-4025-bc40-6c78fddf35af\") " Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.163940 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b7102a7-347a-4025-bc40-6c78fddf35af-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "1b7102a7-347a-4025-bc40-6c78fddf35af" (UID: "1b7102a7-347a-4025-bc40-6c78fddf35af"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.164051 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1f60515c-9ab2-40c3-a430-2ff330f483e6-config" (OuterVolumeSpecName: "config") pod "1f60515c-9ab2-40c3-a430-2ff330f483e6" (UID: "1f60515c-9ab2-40c3-a430-2ff330f483e6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.164052 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b7102a7-347a-4025-bc40-6c78fddf35af-config" (OuterVolumeSpecName: "config") pod "1b7102a7-347a-4025-bc40-6c78fddf35af" (UID: "1b7102a7-347a-4025-bc40-6c78fddf35af"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.164171 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b7102a7-347a-4025-bc40-6c78fddf35af-client-ca" (OuterVolumeSpecName: "client-ca") pod "1b7102a7-347a-4025-bc40-6c78fddf35af" (UID: "1b7102a7-347a-4025-bc40-6c78fddf35af"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.164328 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1f60515c-9ab2-40c3-a430-2ff330f483e6-client-ca" (OuterVolumeSpecName: "client-ca") pod "1f60515c-9ab2-40c3-a430-2ff330f483e6" (UID: "1f60515c-9ab2-40c3-a430-2ff330f483e6"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.167879 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b7102a7-347a-4025-bc40-6c78fddf35af-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1b7102a7-347a-4025-bc40-6c78fddf35af" (UID: "1b7102a7-347a-4025-bc40-6c78fddf35af"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.167911 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f60515c-9ab2-40c3-a430-2ff330f483e6-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1f60515c-9ab2-40c3-a430-2ff330f483e6" (UID: "1f60515c-9ab2-40c3-a430-2ff330f483e6"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.168073 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f60515c-9ab2-40c3-a430-2ff330f483e6-kube-api-access-7sfts" (OuterVolumeSpecName: "kube-api-access-7sfts") pod "1f60515c-9ab2-40c3-a430-2ff330f483e6" (UID: "1f60515c-9ab2-40c3-a430-2ff330f483e6"). InnerVolumeSpecName "kube-api-access-7sfts". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.168496 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b7102a7-347a-4025-bc40-6c78fddf35af-kube-api-access-rpzs4" (OuterVolumeSpecName: "kube-api-access-rpzs4") pod "1b7102a7-347a-4025-bc40-6c78fddf35af" (UID: "1b7102a7-347a-4025-bc40-6c78fddf35af"). InnerVolumeSpecName "kube-api-access-rpzs4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.264691 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7sfts\" (UniqueName: \"kubernetes.io/projected/1f60515c-9ab2-40c3-a430-2ff330f483e6-kube-api-access-7sfts\") on node \"crc\" DevicePath \"\"" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.264734 4910 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1b7102a7-347a-4025-bc40-6c78fddf35af-client-ca\") on node \"crc\" DevicePath \"\"" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.264747 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rpzs4\" (UniqueName: \"kubernetes.io/projected/1b7102a7-347a-4025-bc40-6c78fddf35af-kube-api-access-rpzs4\") on node \"crc\" DevicePath \"\"" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.264756 4910 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1f60515c-9ab2-40c3-a430-2ff330f483e6-client-ca\") on node \"crc\" DevicePath \"\"" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.264765 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f60515c-9ab2-40c3-a430-2ff330f483e6-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.264774 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b7102a7-347a-4025-bc40-6c78fddf35af-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.264782 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1b7102a7-347a-4025-bc40-6c78fddf35af-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.264790 4910 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1b7102a7-347a-4025-bc40-6c78fddf35af-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.264798 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1f60515c-9ab2-40c3-a430-2ff330f483e6-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.386385 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-54c78b4894-sdlgm"] Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.388604 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-54c78b4894-sdlgm"] Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.438281 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-c9bcd9746-bwbfh"] Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.445204 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-c9bcd9746-bwbfh"] Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.731698 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b7102a7-347a-4025-bc40-6c78fddf35af" path="/var/lib/kubelet/pods/1b7102a7-347a-4025-bc40-6c78fddf35af/volumes" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.732794 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1f60515c-9ab2-40c3-a430-2ff330f483e6" path="/var/lib/kubelet/pods/1f60515c-9ab2-40c3-a430-2ff330f483e6/volumes" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.800150 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" podUID="aa805313-499f-47e9-8ffa-827fb2664a71" containerName="oauth-openshift" containerID="cri-o://5ecee57b82781d0b4c3e3c55bba5a8bbc0addee4eda8d09dfeff805ba544f468" gracePeriod=15 Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.942778 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-87487f679-vbkmz"] Jan 05 21:55:00 crc kubenswrapper[4910]: E0105 21:55:00.943145 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b7102a7-347a-4025-bc40-6c78fddf35af" containerName="controller-manager" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.943169 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b7102a7-347a-4025-bc40-6c78fddf35af" containerName="controller-manager" Jan 05 21:55:00 crc kubenswrapper[4910]: E0105 21:55:00.943187 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aef8fca5-e47a-4942-8f59-42731aa77419" containerName="registry-server" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.943195 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="aef8fca5-e47a-4942-8f59-42731aa77419" containerName="registry-server" Jan 05 21:55:00 crc kubenswrapper[4910]: E0105 21:55:00.943215 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93cdd0f0-6faf-4d13-b090-21afa1ae8f76" containerName="extract-content" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.943221 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="93cdd0f0-6faf-4d13-b090-21afa1ae8f76" containerName="extract-content" Jan 05 21:55:00 crc kubenswrapper[4910]: E0105 21:55:00.943235 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="554221c9-a077-40a9-a756-a9589d845ef7" containerName="extract-content" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.943245 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="554221c9-a077-40a9-a756-a9589d845ef7" containerName="extract-content" Jan 05 21:55:00 crc kubenswrapper[4910]: E0105 21:55:00.943256 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6181ab2-b292-4e7d-b30e-ec724946700c" containerName="extract-content" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.943262 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6181ab2-b292-4e7d-b30e-ec724946700c" containerName="extract-content" Jan 05 21:55:00 crc kubenswrapper[4910]: E0105 21:55:00.943274 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f60515c-9ab2-40c3-a430-2ff330f483e6" containerName="route-controller-manager" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.943280 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f60515c-9ab2-40c3-a430-2ff330f483e6" containerName="route-controller-manager" Jan 05 21:55:00 crc kubenswrapper[4910]: E0105 21:55:00.943288 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6181ab2-b292-4e7d-b30e-ec724946700c" containerName="extract-utilities" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.943296 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6181ab2-b292-4e7d-b30e-ec724946700c" containerName="extract-utilities" Jan 05 21:55:00 crc kubenswrapper[4910]: E0105 21:55:00.943307 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="554221c9-a077-40a9-a756-a9589d845ef7" containerName="extract-utilities" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.943315 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="554221c9-a077-40a9-a756-a9589d845ef7" containerName="extract-utilities" Jan 05 21:55:00 crc kubenswrapper[4910]: E0105 21:55:00.943326 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aef8fca5-e47a-4942-8f59-42731aa77419" containerName="extract-content" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.943333 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="aef8fca5-e47a-4942-8f59-42731aa77419" containerName="extract-content" Jan 05 21:55:00 crc kubenswrapper[4910]: E0105 21:55:00.943347 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93cdd0f0-6faf-4d13-b090-21afa1ae8f76" containerName="registry-server" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.943353 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="93cdd0f0-6faf-4d13-b090-21afa1ae8f76" containerName="registry-server" Jan 05 21:55:00 crc kubenswrapper[4910]: E0105 21:55:00.943361 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6181ab2-b292-4e7d-b30e-ec724946700c" containerName="registry-server" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.943367 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6181ab2-b292-4e7d-b30e-ec724946700c" containerName="registry-server" Jan 05 21:55:00 crc kubenswrapper[4910]: E0105 21:55:00.943377 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93cdd0f0-6faf-4d13-b090-21afa1ae8f76" containerName="extract-utilities" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.943383 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="93cdd0f0-6faf-4d13-b090-21afa1ae8f76" containerName="extract-utilities" Jan 05 21:55:00 crc kubenswrapper[4910]: E0105 21:55:00.943393 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aef8fca5-e47a-4942-8f59-42731aa77419" containerName="extract-utilities" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.943399 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="aef8fca5-e47a-4942-8f59-42731aa77419" containerName="extract-utilities" Jan 05 21:55:00 crc kubenswrapper[4910]: E0105 21:55:00.943411 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="554221c9-a077-40a9-a756-a9589d845ef7" containerName="registry-server" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.943417 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="554221c9-a077-40a9-a756-a9589d845ef7" containerName="registry-server" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.943524 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b7102a7-347a-4025-bc40-6c78fddf35af" containerName="controller-manager" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.943536 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6181ab2-b292-4e7d-b30e-ec724946700c" containerName="registry-server" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.943544 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="554221c9-a077-40a9-a756-a9589d845ef7" containerName="registry-server" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.943551 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f60515c-9ab2-40c3-a430-2ff330f483e6" containerName="route-controller-manager" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.943565 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="aef8fca5-e47a-4942-8f59-42731aa77419" containerName="registry-server" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.943574 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="93cdd0f0-6faf-4d13-b090-21afa1ae8f76" containerName="registry-server" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.944135 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-87487f679-vbkmz" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.947623 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.947993 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.948155 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.948398 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.959038 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.960307 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7f475d6877-ww2dz"] Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.962143 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7f475d6877-ww2dz" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.965272 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.966135 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.966271 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.966436 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.966535 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.969385 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.996398 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/72caeca9-12a4-49ea-bacf-9aaa07f625e5-client-ca\") pod \"route-controller-manager-7f475d6877-ww2dz\" (UID: \"72caeca9-12a4-49ea-bacf-9aaa07f625e5\") " pod="openshift-route-controller-manager/route-controller-manager-7f475d6877-ww2dz" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.996528 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8730029b-50dc-4410-bfa7-c76b57ac41be-config\") pod \"controller-manager-87487f679-vbkmz\" (UID: \"8730029b-50dc-4410-bfa7-c76b57ac41be\") " pod="openshift-controller-manager/controller-manager-87487f679-vbkmz" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.996561 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/72caeca9-12a4-49ea-bacf-9aaa07f625e5-serving-cert\") pod \"route-controller-manager-7f475d6877-ww2dz\" (UID: \"72caeca9-12a4-49ea-bacf-9aaa07f625e5\") " pod="openshift-route-controller-manager/route-controller-manager-7f475d6877-ww2dz" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.996573 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.996604 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8730029b-50dc-4410-bfa7-c76b57ac41be-serving-cert\") pod \"controller-manager-87487f679-vbkmz\" (UID: \"8730029b-50dc-4410-bfa7-c76b57ac41be\") " pod="openshift-controller-manager/controller-manager-87487f679-vbkmz" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.996849 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78mw6\" (UniqueName: \"kubernetes.io/projected/8730029b-50dc-4410-bfa7-c76b57ac41be-kube-api-access-78mw6\") pod \"controller-manager-87487f679-vbkmz\" (UID: \"8730029b-50dc-4410-bfa7-c76b57ac41be\") " pod="openshift-controller-manager/controller-manager-87487f679-vbkmz" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.996919 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8730029b-50dc-4410-bfa7-c76b57ac41be-proxy-ca-bundles\") pod \"controller-manager-87487f679-vbkmz\" (UID: \"8730029b-50dc-4410-bfa7-c76b57ac41be\") " pod="openshift-controller-manager/controller-manager-87487f679-vbkmz" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.996951 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8730029b-50dc-4410-bfa7-c76b57ac41be-client-ca\") pod \"controller-manager-87487f679-vbkmz\" (UID: \"8730029b-50dc-4410-bfa7-c76b57ac41be\") " pod="openshift-controller-manager/controller-manager-87487f679-vbkmz" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.997014 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72caeca9-12a4-49ea-bacf-9aaa07f625e5-config\") pod \"route-controller-manager-7f475d6877-ww2dz\" (UID: \"72caeca9-12a4-49ea-bacf-9aaa07f625e5\") " pod="openshift-route-controller-manager/route-controller-manager-7f475d6877-ww2dz" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.997058 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tdk42\" (UniqueName: \"kubernetes.io/projected/72caeca9-12a4-49ea-bacf-9aaa07f625e5-kube-api-access-tdk42\") pod \"route-controller-manager-7f475d6877-ww2dz\" (UID: \"72caeca9-12a4-49ea-bacf-9aaa07f625e5\") " pod="openshift-route-controller-manager/route-controller-manager-7f475d6877-ww2dz" Jan 05 21:55:00 crc kubenswrapper[4910]: I0105 21:55:00.997092 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.001153 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7f475d6877-ww2dz"] Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.003662 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-87487f679-vbkmz"] Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.076179 4910 generic.go:334] "Generic (PLEG): container finished" podID="aa805313-499f-47e9-8ffa-827fb2664a71" containerID="5ecee57b82781d0b4c3e3c55bba5a8bbc0addee4eda8d09dfeff805ba544f468" exitCode=0 Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.076266 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" event={"ID":"aa805313-499f-47e9-8ffa-827fb2664a71","Type":"ContainerDied","Data":"5ecee57b82781d0b4c3e3c55bba5a8bbc0addee4eda8d09dfeff805ba544f468"} Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.101077 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8730029b-50dc-4410-bfa7-c76b57ac41be-config\") pod \"controller-manager-87487f679-vbkmz\" (UID: \"8730029b-50dc-4410-bfa7-c76b57ac41be\") " pod="openshift-controller-manager/controller-manager-87487f679-vbkmz" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.101138 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/72caeca9-12a4-49ea-bacf-9aaa07f625e5-serving-cert\") pod \"route-controller-manager-7f475d6877-ww2dz\" (UID: \"72caeca9-12a4-49ea-bacf-9aaa07f625e5\") " pod="openshift-route-controller-manager/route-controller-manager-7f475d6877-ww2dz" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.101165 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8730029b-50dc-4410-bfa7-c76b57ac41be-serving-cert\") pod \"controller-manager-87487f679-vbkmz\" (UID: \"8730029b-50dc-4410-bfa7-c76b57ac41be\") " pod="openshift-controller-manager/controller-manager-87487f679-vbkmz" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.101216 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78mw6\" (UniqueName: \"kubernetes.io/projected/8730029b-50dc-4410-bfa7-c76b57ac41be-kube-api-access-78mw6\") pod \"controller-manager-87487f679-vbkmz\" (UID: \"8730029b-50dc-4410-bfa7-c76b57ac41be\") " pod="openshift-controller-manager/controller-manager-87487f679-vbkmz" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.101241 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8730029b-50dc-4410-bfa7-c76b57ac41be-client-ca\") pod \"controller-manager-87487f679-vbkmz\" (UID: \"8730029b-50dc-4410-bfa7-c76b57ac41be\") " pod="openshift-controller-manager/controller-manager-87487f679-vbkmz" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.101255 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8730029b-50dc-4410-bfa7-c76b57ac41be-proxy-ca-bundles\") pod \"controller-manager-87487f679-vbkmz\" (UID: \"8730029b-50dc-4410-bfa7-c76b57ac41be\") " pod="openshift-controller-manager/controller-manager-87487f679-vbkmz" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.101286 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72caeca9-12a4-49ea-bacf-9aaa07f625e5-config\") pod \"route-controller-manager-7f475d6877-ww2dz\" (UID: \"72caeca9-12a4-49ea-bacf-9aaa07f625e5\") " pod="openshift-route-controller-manager/route-controller-manager-7f475d6877-ww2dz" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.101303 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tdk42\" (UniqueName: \"kubernetes.io/projected/72caeca9-12a4-49ea-bacf-9aaa07f625e5-kube-api-access-tdk42\") pod \"route-controller-manager-7f475d6877-ww2dz\" (UID: \"72caeca9-12a4-49ea-bacf-9aaa07f625e5\") " pod="openshift-route-controller-manager/route-controller-manager-7f475d6877-ww2dz" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.101323 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/72caeca9-12a4-49ea-bacf-9aaa07f625e5-client-ca\") pod \"route-controller-manager-7f475d6877-ww2dz\" (UID: \"72caeca9-12a4-49ea-bacf-9aaa07f625e5\") " pod="openshift-route-controller-manager/route-controller-manager-7f475d6877-ww2dz" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.102240 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/72caeca9-12a4-49ea-bacf-9aaa07f625e5-client-ca\") pod \"route-controller-manager-7f475d6877-ww2dz\" (UID: \"72caeca9-12a4-49ea-bacf-9aaa07f625e5\") " pod="openshift-route-controller-manager/route-controller-manager-7f475d6877-ww2dz" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.103648 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8730029b-50dc-4410-bfa7-c76b57ac41be-config\") pod \"controller-manager-87487f679-vbkmz\" (UID: \"8730029b-50dc-4410-bfa7-c76b57ac41be\") " pod="openshift-controller-manager/controller-manager-87487f679-vbkmz" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.103757 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8730029b-50dc-4410-bfa7-c76b57ac41be-client-ca\") pod \"controller-manager-87487f679-vbkmz\" (UID: \"8730029b-50dc-4410-bfa7-c76b57ac41be\") " pod="openshift-controller-manager/controller-manager-87487f679-vbkmz" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.104363 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8730029b-50dc-4410-bfa7-c76b57ac41be-proxy-ca-bundles\") pod \"controller-manager-87487f679-vbkmz\" (UID: \"8730029b-50dc-4410-bfa7-c76b57ac41be\") " pod="openshift-controller-manager/controller-manager-87487f679-vbkmz" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.105094 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72caeca9-12a4-49ea-bacf-9aaa07f625e5-config\") pod \"route-controller-manager-7f475d6877-ww2dz\" (UID: \"72caeca9-12a4-49ea-bacf-9aaa07f625e5\") " pod="openshift-route-controller-manager/route-controller-manager-7f475d6877-ww2dz" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.120849 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8730029b-50dc-4410-bfa7-c76b57ac41be-serving-cert\") pod \"controller-manager-87487f679-vbkmz\" (UID: \"8730029b-50dc-4410-bfa7-c76b57ac41be\") " pod="openshift-controller-manager/controller-manager-87487f679-vbkmz" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.124735 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/72caeca9-12a4-49ea-bacf-9aaa07f625e5-serving-cert\") pod \"route-controller-manager-7f475d6877-ww2dz\" (UID: \"72caeca9-12a4-49ea-bacf-9aaa07f625e5\") " pod="openshift-route-controller-manager/route-controller-manager-7f475d6877-ww2dz" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.133749 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tdk42\" (UniqueName: \"kubernetes.io/projected/72caeca9-12a4-49ea-bacf-9aaa07f625e5-kube-api-access-tdk42\") pod \"route-controller-manager-7f475d6877-ww2dz\" (UID: \"72caeca9-12a4-49ea-bacf-9aaa07f625e5\") " pod="openshift-route-controller-manager/route-controller-manager-7f475d6877-ww2dz" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.138954 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78mw6\" (UniqueName: \"kubernetes.io/projected/8730029b-50dc-4410-bfa7-c76b57ac41be-kube-api-access-78mw6\") pod \"controller-manager-87487f679-vbkmz\" (UID: \"8730029b-50dc-4410-bfa7-c76b57ac41be\") " pod="openshift-controller-manager/controller-manager-87487f679-vbkmz" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.163147 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.280287 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-87487f679-vbkmz" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.303778 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-user-template-login\") pod \"aa805313-499f-47e9-8ffa-827fb2664a71\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.303835 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-service-ca\") pod \"aa805313-499f-47e9-8ffa-827fb2664a71\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.303870 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-user-template-provider-selection\") pod \"aa805313-499f-47e9-8ffa-827fb2664a71\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.303900 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-user-template-error\") pod \"aa805313-499f-47e9-8ffa-827fb2664a71\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.303942 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-cliconfig\") pod \"aa805313-499f-47e9-8ffa-827fb2664a71\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.303961 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-ocp-branding-template\") pod \"aa805313-499f-47e9-8ffa-827fb2664a71\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.303991 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rs6qv\" (UniqueName: \"kubernetes.io/projected/aa805313-499f-47e9-8ffa-827fb2664a71-kube-api-access-rs6qv\") pod \"aa805313-499f-47e9-8ffa-827fb2664a71\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.304017 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-session\") pod \"aa805313-499f-47e9-8ffa-827fb2664a71\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.304052 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-serving-cert\") pod \"aa805313-499f-47e9-8ffa-827fb2664a71\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.304074 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-trusted-ca-bundle\") pod \"aa805313-499f-47e9-8ffa-827fb2664a71\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.304132 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-router-certs\") pod \"aa805313-499f-47e9-8ffa-827fb2664a71\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.304161 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/aa805313-499f-47e9-8ffa-827fb2664a71-audit-policies\") pod \"aa805313-499f-47e9-8ffa-827fb2664a71\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.304189 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-user-idp-0-file-data\") pod \"aa805313-499f-47e9-8ffa-827fb2664a71\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.304239 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/aa805313-499f-47e9-8ffa-827fb2664a71-audit-dir\") pod \"aa805313-499f-47e9-8ffa-827fb2664a71\" (UID: \"aa805313-499f-47e9-8ffa-827fb2664a71\") " Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.304539 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/aa805313-499f-47e9-8ffa-827fb2664a71-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "aa805313-499f-47e9-8ffa-827fb2664a71" (UID: "aa805313-499f-47e9-8ffa-827fb2664a71"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.305046 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "aa805313-499f-47e9-8ffa-827fb2664a71" (UID: "aa805313-499f-47e9-8ffa-827fb2664a71"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.306691 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "aa805313-499f-47e9-8ffa-827fb2664a71" (UID: "aa805313-499f-47e9-8ffa-827fb2664a71"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.308827 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "aa805313-499f-47e9-8ffa-827fb2664a71" (UID: "aa805313-499f-47e9-8ffa-827fb2664a71"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.310324 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7f475d6877-ww2dz" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.310846 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "aa805313-499f-47e9-8ffa-827fb2664a71" (UID: "aa805313-499f-47e9-8ffa-827fb2664a71"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.312662 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa805313-499f-47e9-8ffa-827fb2664a71-kube-api-access-rs6qv" (OuterVolumeSpecName: "kube-api-access-rs6qv") pod "aa805313-499f-47e9-8ffa-827fb2664a71" (UID: "aa805313-499f-47e9-8ffa-827fb2664a71"). InnerVolumeSpecName "kube-api-access-rs6qv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.312932 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "aa805313-499f-47e9-8ffa-827fb2664a71" (UID: "aa805313-499f-47e9-8ffa-827fb2664a71"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.313424 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa805313-499f-47e9-8ffa-827fb2664a71-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "aa805313-499f-47e9-8ffa-827fb2664a71" (UID: "aa805313-499f-47e9-8ffa-827fb2664a71"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.314040 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "aa805313-499f-47e9-8ffa-827fb2664a71" (UID: "aa805313-499f-47e9-8ffa-827fb2664a71"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.314327 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "aa805313-499f-47e9-8ffa-827fb2664a71" (UID: "aa805313-499f-47e9-8ffa-827fb2664a71"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.316327 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "aa805313-499f-47e9-8ffa-827fb2664a71" (UID: "aa805313-499f-47e9-8ffa-827fb2664a71"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.317239 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "aa805313-499f-47e9-8ffa-827fb2664a71" (UID: "aa805313-499f-47e9-8ffa-827fb2664a71"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.318349 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "aa805313-499f-47e9-8ffa-827fb2664a71" (UID: "aa805313-499f-47e9-8ffa-827fb2664a71"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.319439 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "aa805313-499f-47e9-8ffa-827fb2664a71" (UID: "aa805313-499f-47e9-8ffa-827fb2664a71"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.408205 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.408230 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.408244 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rs6qv\" (UniqueName: \"kubernetes.io/projected/aa805313-499f-47e9-8ffa-827fb2664a71-kube-api-access-rs6qv\") on node \"crc\" DevicePath \"\"" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.408253 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.408262 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.408271 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.408281 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.408292 4910 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/aa805313-499f-47e9-8ffa-827fb2664a71-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.408306 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.408316 4910 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/aa805313-499f-47e9-8ffa-827fb2664a71-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.408324 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.408333 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.408343 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.408354 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/aa805313-499f-47e9-8ffa-827fb2664a71-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.580690 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-87487f679-vbkmz"] Jan 05 21:55:01 crc kubenswrapper[4910]: W0105 21:55:01.586164 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8730029b_50dc_4410_bfa7_c76b57ac41be.slice/crio-a0bd6cf942bb0f5f6ae3df2083af77ac0266e75ef22b3a426e4a1863b60f8dbd WatchSource:0}: Error finding container a0bd6cf942bb0f5f6ae3df2083af77ac0266e75ef22b3a426e4a1863b60f8dbd: Status 404 returned error can't find the container with id a0bd6cf942bb0f5f6ae3df2083af77ac0266e75ef22b3a426e4a1863b60f8dbd Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.734996 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7f475d6877-ww2dz"] Jan 05 21:55:01 crc kubenswrapper[4910]: W0105 21:55:01.740168 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod72caeca9_12a4_49ea_bacf_9aaa07f625e5.slice/crio-aa60563c72ca22000ba77f2d043450cb1c90808ec0b46609bdba168071c8826e WatchSource:0}: Error finding container aa60563c72ca22000ba77f2d043450cb1c90808ec0b46609bdba168071c8826e: Status 404 returned error can't find the container with id aa60563c72ca22000ba77f2d043450cb1c90808ec0b46609bdba168071c8826e Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.939054 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-f79475d48-7zgsg"] Jan 05 21:55:01 crc kubenswrapper[4910]: E0105 21:55:01.939694 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa805313-499f-47e9-8ffa-827fb2664a71" containerName="oauth-openshift" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.939710 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa805313-499f-47e9-8ffa-827fb2664a71" containerName="oauth-openshift" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.940025 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa805313-499f-47e9-8ffa-827fb2664a71" containerName="oauth-openshift" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.940416 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:01 crc kubenswrapper[4910]: I0105 21:55:01.986484 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-f79475d48-7zgsg"] Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.087012 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-87487f679-vbkmz" event={"ID":"8730029b-50dc-4410-bfa7-c76b57ac41be","Type":"ContainerStarted","Data":"6ca020f6f3db27964bad9ad04d394685c2d585d97a3b456341ac79ca61388798"} Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.087064 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-87487f679-vbkmz" event={"ID":"8730029b-50dc-4410-bfa7-c76b57ac41be","Type":"ContainerStarted","Data":"a0bd6cf942bb0f5f6ae3df2083af77ac0266e75ef22b3a426e4a1863b60f8dbd"} Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.087145 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-87487f679-vbkmz" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.089590 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7f475d6877-ww2dz" event={"ID":"72caeca9-12a4-49ea-bacf-9aaa07f625e5","Type":"ContainerStarted","Data":"3a8286f6e1c7db354f72e1df7e2160f4986b3cd70177a35567ed677e4ce75423"} Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.089636 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7f475d6877-ww2dz" event={"ID":"72caeca9-12a4-49ea-bacf-9aaa07f625e5","Type":"ContainerStarted","Data":"aa60563c72ca22000ba77f2d043450cb1c90808ec0b46609bdba168071c8826e"} Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.089797 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7f475d6877-ww2dz" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.092305 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" event={"ID":"aa805313-499f-47e9-8ffa-827fb2664a71","Type":"ContainerDied","Data":"11f3001fe15eacfac46a955935960d39350713a51a8a037908c80711de6c0e5f"} Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.092323 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-gqzj7" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.092385 4910 scope.go:117] "RemoveContainer" containerID="5ecee57b82781d0b4c3e3c55bba5a8bbc0addee4eda8d09dfeff805ba544f468" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.092737 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-87487f679-vbkmz" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.110577 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-87487f679-vbkmz" podStartSLOduration=3.110556382 podStartE2EDuration="3.110556382s" podCreationTimestamp="2026-01-05 21:54:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:55:02.109460141 +0000 UTC m=+233.686957811" watchObservedRunningTime="2026-01-05 21:55:02.110556382 +0000 UTC m=+233.688054052" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.119229 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-system-router-certs\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.119302 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-system-service-ca\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.119389 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-system-cliconfig\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.119557 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-user-template-login\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.119582 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gx8hv\" (UniqueName: \"kubernetes.io/projected/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-kube-api-access-gx8hv\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.119616 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.119679 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-audit-dir\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.119739 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-audit-policies\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.119765 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.119786 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-user-template-error\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.119823 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-system-serving-cert\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.119841 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.119862 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.119886 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-system-session\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.130282 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7f475d6877-ww2dz" podStartSLOduration=3.130266824 podStartE2EDuration="3.130266824s" podCreationTimestamp="2026-01-05 21:54:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:55:02.128475591 +0000 UTC m=+233.705973261" watchObservedRunningTime="2026-01-05 21:55:02.130266824 +0000 UTC m=+233.707764494" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.161627 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-gqzj7"] Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.167922 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-gqzj7"] Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.220913 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7f475d6877-ww2dz" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.221548 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-system-session\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.221615 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-system-router-certs\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.222336 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-system-service-ca\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.222882 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-system-service-ca\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.222356 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-system-cliconfig\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.223000 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-user-template-login\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.223019 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gx8hv\" (UniqueName: \"kubernetes.io/projected/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-kube-api-access-gx8hv\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.223040 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.223078 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-audit-dir\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.223138 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-audit-policies\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.223155 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.223172 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-user-template-error\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.223198 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-system-serving-cert\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.223209 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-system-cliconfig\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.223214 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.223313 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.226976 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-audit-policies\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.227035 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-audit-dir\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.228245 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.229859 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.233459 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-system-serving-cert\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.233556 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-system-router-certs\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.235909 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-user-template-error\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.236035 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.236349 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-system-session\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.236488 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.236663 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-v4-0-config-user-template-login\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.251651 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gx8hv\" (UniqueName: \"kubernetes.io/projected/d05dbdb1-d1ff-44d1-80b0-359e543a36b2-kube-api-access-gx8hv\") pod \"oauth-openshift-f79475d48-7zgsg\" (UID: \"d05dbdb1-d1ff-44d1-80b0-359e543a36b2\") " pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.258603 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.672174 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-f79475d48-7zgsg"] Jan 05 21:55:02 crc kubenswrapper[4910]: I0105 21:55:02.727940 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa805313-499f-47e9-8ffa-827fb2664a71" path="/var/lib/kubelet/pods/aa805313-499f-47e9-8ffa-827fb2664a71/volumes" Jan 05 21:55:03 crc kubenswrapper[4910]: I0105 21:55:03.100973 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" event={"ID":"d05dbdb1-d1ff-44d1-80b0-359e543a36b2","Type":"ContainerStarted","Data":"37645907ceea960b2c329672a2e46999c32a15eaa10acc77bf791f9c6546d7c9"} Jan 05 21:55:03 crc kubenswrapper[4910]: I0105 21:55:03.102649 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" event={"ID":"d05dbdb1-d1ff-44d1-80b0-359e543a36b2","Type":"ContainerStarted","Data":"2f91f751e40671ab244cb05f8721d92ce1166adeed5f553fa3866201a24c0172"} Jan 05 21:55:03 crc kubenswrapper[4910]: I0105 21:55:03.103031 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:03 crc kubenswrapper[4910]: I0105 21:55:03.104252 4910 patch_prober.go:28] interesting pod/oauth-openshift-f79475d48-7zgsg container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.63:6443/healthz\": dial tcp 10.217.0.63:6443: connect: connection refused" start-of-body= Jan 05 21:55:03 crc kubenswrapper[4910]: I0105 21:55:03.104303 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" podUID="d05dbdb1-d1ff-44d1-80b0-359e543a36b2" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.63:6443/healthz\": dial tcp 10.217.0.63:6443: connect: connection refused" Jan 05 21:55:03 crc kubenswrapper[4910]: I0105 21:55:03.121093 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" podStartSLOduration=28.121070086 podStartE2EDuration="28.121070086s" podCreationTimestamp="2026-01-05 21:54:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:55:03.11926317 +0000 UTC m=+234.696760860" watchObservedRunningTime="2026-01-05 21:55:03.121070086 +0000 UTC m=+234.698567756" Jan 05 21:55:04 crc kubenswrapper[4910]: I0105 21:55:04.108230 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-f79475d48-7zgsg" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.401738 4910 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.402868 4910 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.403220 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824" gracePeriod=15 Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.403380 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.403780 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199" gracePeriod=15 Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.403836 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572" gracePeriod=15 Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.403878 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e" gracePeriod=15 Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.403918 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045" gracePeriod=15 Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.405392 4910 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 05 21:55:11 crc kubenswrapper[4910]: E0105 21:55:11.405597 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.405617 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 05 21:55:11 crc kubenswrapper[4910]: E0105 21:55:11.405639 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.405649 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 05 21:55:11 crc kubenswrapper[4910]: E0105 21:55:11.405660 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.405670 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 05 21:55:11 crc kubenswrapper[4910]: E0105 21:55:11.405687 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.405697 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 05 21:55:11 crc kubenswrapper[4910]: E0105 21:55:11.405717 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.405727 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 05 21:55:11 crc kubenswrapper[4910]: E0105 21:55:11.405738 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.405748 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 05 21:55:11 crc kubenswrapper[4910]: E0105 21:55:11.405763 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.405773 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.405964 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.405981 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.405992 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.406002 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.406016 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.406028 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.445474 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.459989 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.460045 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.460090 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.460216 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.460256 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.460300 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.460355 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.460404 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 05 21:55:11 crc kubenswrapper[4910]: E0105 21:55:11.547382 4910 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e.scope\": RecentStats: unable to find data in memory cache]" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.561504 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.561551 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.561590 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.561623 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.561650 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.561678 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.561713 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.561719 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.561710 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.561747 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.561774 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.561736 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.561795 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.561801 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.561795 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.562083 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 05 21:55:11 crc kubenswrapper[4910]: I0105 21:55:11.742435 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 05 21:55:11 crc kubenswrapper[4910]: E0105 21:55:11.773093 4910 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.166:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.1887f46946d16759 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-05 21:55:11.763081049 +0000 UTC m=+243.340578719,LastTimestamp:2026-01-05 21:55:11.763081049 +0000 UTC m=+243.340578719,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 05 21:55:12 crc kubenswrapper[4910]: I0105 21:55:12.149676 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"98c820565e6cda88f268a07a35f60b24eaa6af65d99da902f590dd6f3d9b9380"} Jan 05 21:55:12 crc kubenswrapper[4910]: I0105 21:55:12.150409 4910 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.166:6443: connect: connection refused" Jan 05 21:55:12 crc kubenswrapper[4910]: I0105 21:55:12.151469 4910 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.166:6443: connect: connection refused" Jan 05 21:55:12 crc kubenswrapper[4910]: I0105 21:55:12.151654 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"bad99426befca9474e65d1f46c4c981976dbda69c64761ea87567ad63d06f9f7"} Jan 05 21:55:12 crc kubenswrapper[4910]: I0105 21:55:12.152280 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 05 21:55:12 crc kubenswrapper[4910]: I0105 21:55:12.153855 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 05 21:55:12 crc kubenswrapper[4910]: I0105 21:55:12.154524 4910 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199" exitCode=0 Jan 05 21:55:12 crc kubenswrapper[4910]: I0105 21:55:12.154644 4910 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572" exitCode=0 Jan 05 21:55:12 crc kubenswrapper[4910]: I0105 21:55:12.154709 4910 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e" exitCode=0 Jan 05 21:55:12 crc kubenswrapper[4910]: I0105 21:55:12.154779 4910 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045" exitCode=2 Jan 05 21:55:12 crc kubenswrapper[4910]: I0105 21:55:12.154889 4910 scope.go:117] "RemoveContainer" containerID="9c9a26ddac86d27efcf0dcd6a00cd4baa16609795c28e147b3e02cd5c541ccf6" Jan 05 21:55:13 crc kubenswrapper[4910]: I0105 21:55:13.166728 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 05 21:55:13 crc kubenswrapper[4910]: E0105 21:55:13.451733 4910 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.166:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.1887f46946d16759 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-05 21:55:11.763081049 +0000 UTC m=+243.340578719,LastTimestamp:2026-01-05 21:55:11.763081049 +0000 UTC m=+243.340578719,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 05 21:55:13 crc kubenswrapper[4910]: I0105 21:55:13.889995 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 05 21:55:13 crc kubenswrapper[4910]: I0105 21:55:13.890965 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:55:13 crc kubenswrapper[4910]: I0105 21:55:13.891683 4910 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.166:6443: connect: connection refused" Jan 05 21:55:13 crc kubenswrapper[4910]: I0105 21:55:13.892327 4910 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.166:6443: connect: connection refused" Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.008130 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.008193 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.008250 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.008274 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.008344 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.008367 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.008466 4910 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.008478 4910 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.008487 4910 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.178833 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.181535 4910 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824" exitCode=0 Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.181701 4910 scope.go:117] "RemoveContainer" containerID="99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199" Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.181719 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.197052 4910 scope.go:117] "RemoveContainer" containerID="f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572" Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.201586 4910 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.166:6443: connect: connection refused" Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.202442 4910 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.166:6443: connect: connection refused" Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.219587 4910 scope.go:117] "RemoveContainer" containerID="138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e" Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.239857 4910 scope.go:117] "RemoveContainer" containerID="0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045" Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.257487 4910 scope.go:117] "RemoveContainer" containerID="ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824" Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.280214 4910 scope.go:117] "RemoveContainer" containerID="15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5" Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.297352 4910 scope.go:117] "RemoveContainer" containerID="99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199" Jan 05 21:55:14 crc kubenswrapper[4910]: E0105 21:55:14.297993 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\": container with ID starting with 99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199 not found: ID does not exist" containerID="99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199" Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.298035 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199"} err="failed to get container status \"99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\": rpc error: code = NotFound desc = could not find container \"99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199\": container with ID starting with 99db0b69e28d944950b1d66c8487076992c5c22b710392c7a5f25eb2c65a6199 not found: ID does not exist" Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.298063 4910 scope.go:117] "RemoveContainer" containerID="f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572" Jan 05 21:55:14 crc kubenswrapper[4910]: E0105 21:55:14.298585 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\": container with ID starting with f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572 not found: ID does not exist" containerID="f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572" Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.298641 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572"} err="failed to get container status \"f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\": rpc error: code = NotFound desc = could not find container \"f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572\": container with ID starting with f244ffda68eddab87cb18cbd265e0e598bf55fb2dc9ff3dc7e10e0f018b27572 not found: ID does not exist" Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.298682 4910 scope.go:117] "RemoveContainer" containerID="138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e" Jan 05 21:55:14 crc kubenswrapper[4910]: E0105 21:55:14.299286 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\": container with ID starting with 138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e not found: ID does not exist" containerID="138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e" Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.299341 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e"} err="failed to get container status \"138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\": rpc error: code = NotFound desc = could not find container \"138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e\": container with ID starting with 138025ea5b7f80cb77eca68e1b4962106dbb9da9579819a5a24f6ac80bcfa60e not found: ID does not exist" Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.299382 4910 scope.go:117] "RemoveContainer" containerID="0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045" Jan 05 21:55:14 crc kubenswrapper[4910]: E0105 21:55:14.299753 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\": container with ID starting with 0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045 not found: ID does not exist" containerID="0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045" Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.299818 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045"} err="failed to get container status \"0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\": rpc error: code = NotFound desc = could not find container \"0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045\": container with ID starting with 0f990f37acfc25ac2603ff96b3a7137c1e86da3cdbcc67baa8f695a3319c5045 not found: ID does not exist" Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.299861 4910 scope.go:117] "RemoveContainer" containerID="ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824" Jan 05 21:55:14 crc kubenswrapper[4910]: E0105 21:55:14.300361 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\": container with ID starting with ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824 not found: ID does not exist" containerID="ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824" Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.300394 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824"} err="failed to get container status \"ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\": rpc error: code = NotFound desc = could not find container \"ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824\": container with ID starting with ccea8c0eed79919e10570f364b5257fd42daa6f9bbd5739785cda2e1fd077824 not found: ID does not exist" Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.300413 4910 scope.go:117] "RemoveContainer" containerID="15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5" Jan 05 21:55:14 crc kubenswrapper[4910]: E0105 21:55:14.300698 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\": container with ID starting with 15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5 not found: ID does not exist" containerID="15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5" Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.300740 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5"} err="failed to get container status \"15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\": rpc error: code = NotFound desc = could not find container \"15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5\": container with ID starting with 15630c05ae73bb51db0e5ca56255ca57082d11774f4b4f70596478e78ba517a5 not found: ID does not exist" Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.728497 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Jan 05 21:55:14 crc kubenswrapper[4910]: E0105 21:55:14.855299 4910 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.166:6443: connect: connection refused" Jan 05 21:55:14 crc kubenswrapper[4910]: E0105 21:55:14.855997 4910 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.166:6443: connect: connection refused" Jan 05 21:55:14 crc kubenswrapper[4910]: E0105 21:55:14.856510 4910 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.166:6443: connect: connection refused" Jan 05 21:55:14 crc kubenswrapper[4910]: E0105 21:55:14.856766 4910 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.166:6443: connect: connection refused" Jan 05 21:55:14 crc kubenswrapper[4910]: E0105 21:55:14.856981 4910 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.166:6443: connect: connection refused" Jan 05 21:55:14 crc kubenswrapper[4910]: I0105 21:55:14.857011 4910 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Jan 05 21:55:14 crc kubenswrapper[4910]: E0105 21:55:14.857251 4910 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.166:6443: connect: connection refused" interval="200ms" Jan 05 21:55:15 crc kubenswrapper[4910]: E0105 21:55:15.057939 4910 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.166:6443: connect: connection refused" interval="400ms" Jan 05 21:55:15 crc kubenswrapper[4910]: E0105 21:55:15.458690 4910 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.166:6443: connect: connection refused" interval="800ms" Jan 05 21:55:16 crc kubenswrapper[4910]: E0105 21:55:16.259633 4910 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.166:6443: connect: connection refused" interval="1.6s" Jan 05 21:55:17 crc kubenswrapper[4910]: I0105 21:55:17.205204 4910 generic.go:334] "Generic (PLEG): container finished" podID="ff686cfa-03a7-4c78-8efc-17407e5e79c0" containerID="835e14a77c1b3239d8a5aed34d5d0e9b630634019d8fd581cd9a47169102c4ac" exitCode=0 Jan 05 21:55:17 crc kubenswrapper[4910]: I0105 21:55:17.205316 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"ff686cfa-03a7-4c78-8efc-17407e5e79c0","Type":"ContainerDied","Data":"835e14a77c1b3239d8a5aed34d5d0e9b630634019d8fd581cd9a47169102c4ac"} Jan 05 21:55:17 crc kubenswrapper[4910]: I0105 21:55:17.206283 4910 status_manager.go:851] "Failed to get status for pod" podUID="ff686cfa-03a7-4c78-8efc-17407e5e79c0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.166:6443: connect: connection refused" Jan 05 21:55:17 crc kubenswrapper[4910]: I0105 21:55:17.206601 4910 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.166:6443: connect: connection refused" Jan 05 21:55:17 crc kubenswrapper[4910]: E0105 21:55:17.860714 4910 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.166:6443: connect: connection refused" interval="3.2s" Jan 05 21:55:18 crc kubenswrapper[4910]: I0105 21:55:18.534436 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 05 21:55:18 crc kubenswrapper[4910]: I0105 21:55:18.535282 4910 status_manager.go:851] "Failed to get status for pod" podUID="ff686cfa-03a7-4c78-8efc-17407e5e79c0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.166:6443: connect: connection refused" Jan 05 21:55:18 crc kubenswrapper[4910]: I0105 21:55:18.535819 4910 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.166:6443: connect: connection refused" Jan 05 21:55:18 crc kubenswrapper[4910]: I0105 21:55:18.577342 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ff686cfa-03a7-4c78-8efc-17407e5e79c0-kube-api-access\") pod \"ff686cfa-03a7-4c78-8efc-17407e5e79c0\" (UID: \"ff686cfa-03a7-4c78-8efc-17407e5e79c0\") " Jan 05 21:55:18 crc kubenswrapper[4910]: I0105 21:55:18.577445 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/ff686cfa-03a7-4c78-8efc-17407e5e79c0-var-lock\") pod \"ff686cfa-03a7-4c78-8efc-17407e5e79c0\" (UID: \"ff686cfa-03a7-4c78-8efc-17407e5e79c0\") " Jan 05 21:55:18 crc kubenswrapper[4910]: I0105 21:55:18.577468 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ff686cfa-03a7-4c78-8efc-17407e5e79c0-kubelet-dir\") pod \"ff686cfa-03a7-4c78-8efc-17407e5e79c0\" (UID: \"ff686cfa-03a7-4c78-8efc-17407e5e79c0\") " Jan 05 21:55:18 crc kubenswrapper[4910]: I0105 21:55:18.577562 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ff686cfa-03a7-4c78-8efc-17407e5e79c0-var-lock" (OuterVolumeSpecName: "var-lock") pod "ff686cfa-03a7-4c78-8efc-17407e5e79c0" (UID: "ff686cfa-03a7-4c78-8efc-17407e5e79c0"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 21:55:18 crc kubenswrapper[4910]: I0105 21:55:18.577619 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ff686cfa-03a7-4c78-8efc-17407e5e79c0-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "ff686cfa-03a7-4c78-8efc-17407e5e79c0" (UID: "ff686cfa-03a7-4c78-8efc-17407e5e79c0"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 21:55:18 crc kubenswrapper[4910]: I0105 21:55:18.577718 4910 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/ff686cfa-03a7-4c78-8efc-17407e5e79c0-var-lock\") on node \"crc\" DevicePath \"\"" Jan 05 21:55:18 crc kubenswrapper[4910]: I0105 21:55:18.577735 4910 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ff686cfa-03a7-4c78-8efc-17407e5e79c0-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 05 21:55:18 crc kubenswrapper[4910]: I0105 21:55:18.584276 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff686cfa-03a7-4c78-8efc-17407e5e79c0-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "ff686cfa-03a7-4c78-8efc-17407e5e79c0" (UID: "ff686cfa-03a7-4c78-8efc-17407e5e79c0"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:55:18 crc kubenswrapper[4910]: I0105 21:55:18.678851 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ff686cfa-03a7-4c78-8efc-17407e5e79c0-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 05 21:55:18 crc kubenswrapper[4910]: I0105 21:55:18.724350 4910 status_manager.go:851] "Failed to get status for pod" podUID="ff686cfa-03a7-4c78-8efc-17407e5e79c0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.166:6443: connect: connection refused" Jan 05 21:55:18 crc kubenswrapper[4910]: I0105 21:55:18.724784 4910 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.166:6443: connect: connection refused" Jan 05 21:55:19 crc kubenswrapper[4910]: I0105 21:55:19.218155 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"ff686cfa-03a7-4c78-8efc-17407e5e79c0","Type":"ContainerDied","Data":"08afc29a04e5e60e52bcefb1b7ba6414540dbfb0c940c2a225843b215d775751"} Jan 05 21:55:19 crc kubenswrapper[4910]: I0105 21:55:19.218205 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="08afc29a04e5e60e52bcefb1b7ba6414540dbfb0c940c2a225843b215d775751" Jan 05 21:55:19 crc kubenswrapper[4910]: I0105 21:55:19.218243 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 05 21:55:19 crc kubenswrapper[4910]: I0105 21:55:19.222767 4910 status_manager.go:851] "Failed to get status for pod" podUID="ff686cfa-03a7-4c78-8efc-17407e5e79c0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.166:6443: connect: connection refused" Jan 05 21:55:19 crc kubenswrapper[4910]: I0105 21:55:19.223224 4910 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.166:6443: connect: connection refused" Jan 05 21:55:21 crc kubenswrapper[4910]: E0105 21:55:21.061318 4910 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.166:6443: connect: connection refused" interval="6.4s" Jan 05 21:55:23 crc kubenswrapper[4910]: E0105 21:55:23.454897 4910 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.166:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.1887f46946d16759 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-05 21:55:11.763081049 +0000 UTC m=+243.340578719,LastTimestamp:2026-01-05 21:55:11.763081049 +0000 UTC m=+243.340578719,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 05 21:55:25 crc kubenswrapper[4910]: I0105 21:55:25.721692 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:55:25 crc kubenswrapper[4910]: I0105 21:55:25.724204 4910 status_manager.go:851] "Failed to get status for pod" podUID="ff686cfa-03a7-4c78-8efc-17407e5e79c0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.166:6443: connect: connection refused" Jan 05 21:55:25 crc kubenswrapper[4910]: I0105 21:55:25.725146 4910 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.166:6443: connect: connection refused" Jan 05 21:55:25 crc kubenswrapper[4910]: I0105 21:55:25.746595 4910 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="969290f7-140e-4c49-a197-cfab07022a17" Jan 05 21:55:25 crc kubenswrapper[4910]: I0105 21:55:25.746654 4910 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="969290f7-140e-4c49-a197-cfab07022a17" Jan 05 21:55:25 crc kubenswrapper[4910]: E0105 21:55:25.747373 4910 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.166:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:55:25 crc kubenswrapper[4910]: I0105 21:55:25.748171 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:55:25 crc kubenswrapper[4910]: W0105 21:55:25.764746 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-8328f78cf8dcf52eacda072de385c4a256dd85593dba37633407f1a55f538df5 WatchSource:0}: Error finding container 8328f78cf8dcf52eacda072de385c4a256dd85593dba37633407f1a55f538df5: Status 404 returned error can't find the container with id 8328f78cf8dcf52eacda072de385c4a256dd85593dba37633407f1a55f538df5 Jan 05 21:55:26 crc kubenswrapper[4910]: I0105 21:55:26.026580 4910 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Readiness probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 05 21:55:26 crc kubenswrapper[4910]: I0105 21:55:26.026960 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 05 21:55:26 crc kubenswrapper[4910]: I0105 21:55:26.260202 4910 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="93c5011e16a40f2086e46cb0c1db9730314f0f425471c6eb948843374f08cebf" exitCode=0 Jan 05 21:55:26 crc kubenswrapper[4910]: I0105 21:55:26.260278 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"93c5011e16a40f2086e46cb0c1db9730314f0f425471c6eb948843374f08cebf"} Jan 05 21:55:26 crc kubenswrapper[4910]: I0105 21:55:26.260347 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"8328f78cf8dcf52eacda072de385c4a256dd85593dba37633407f1a55f538df5"} Jan 05 21:55:26 crc kubenswrapper[4910]: I0105 21:55:26.260678 4910 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="969290f7-140e-4c49-a197-cfab07022a17" Jan 05 21:55:26 crc kubenswrapper[4910]: I0105 21:55:26.260702 4910 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="969290f7-140e-4c49-a197-cfab07022a17" Jan 05 21:55:26 crc kubenswrapper[4910]: E0105 21:55:26.261250 4910 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.166:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:55:26 crc kubenswrapper[4910]: I0105 21:55:26.261322 4910 status_manager.go:851] "Failed to get status for pod" podUID="ff686cfa-03a7-4c78-8efc-17407e5e79c0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.166:6443: connect: connection refused" Jan 05 21:55:26 crc kubenswrapper[4910]: I0105 21:55:26.262030 4910 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.166:6443: connect: connection refused" Jan 05 21:55:26 crc kubenswrapper[4910]: I0105 21:55:26.264207 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 05 21:55:26 crc kubenswrapper[4910]: I0105 21:55:26.264262 4910 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e" exitCode=1 Jan 05 21:55:26 crc kubenswrapper[4910]: I0105 21:55:26.264301 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e"} Jan 05 21:55:26 crc kubenswrapper[4910]: I0105 21:55:26.264780 4910 scope.go:117] "RemoveContainer" containerID="02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e" Jan 05 21:55:26 crc kubenswrapper[4910]: I0105 21:55:26.265037 4910 status_manager.go:851] "Failed to get status for pod" podUID="ff686cfa-03a7-4c78-8efc-17407e5e79c0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.166:6443: connect: connection refused" Jan 05 21:55:26 crc kubenswrapper[4910]: I0105 21:55:26.265602 4910 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.166:6443: connect: connection refused" Jan 05 21:55:26 crc kubenswrapper[4910]: I0105 21:55:26.266060 4910 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.166:6443: connect: connection refused" Jan 05 21:55:26 crc kubenswrapper[4910]: I0105 21:55:26.465725 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 05 21:55:27 crc kubenswrapper[4910]: I0105 21:55:27.273056 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 05 21:55:27 crc kubenswrapper[4910]: I0105 21:55:27.273434 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"199c08933606db3a7ffebc792f9dded017d835f8bece24f165885d1d85b2e554"} Jan 05 21:55:27 crc kubenswrapper[4910]: I0105 21:55:27.277401 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"8b58c6752010ba4635764963bfecaf8a2bf61692de073e0f2fb57841d5d7a945"} Jan 05 21:55:27 crc kubenswrapper[4910]: I0105 21:55:27.277456 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"77ec50283abf4a3c2563eea66001199173fa75f75aa99c1c4dd436f82f9dd185"} Jan 05 21:55:27 crc kubenswrapper[4910]: I0105 21:55:27.277472 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"cb05122d0901b8c639650f2b3cfaf23c33ae9e90fc36c2614cde9e6ade85f9cf"} Jan 05 21:55:27 crc kubenswrapper[4910]: I0105 21:55:27.277486 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"27216fd6775b38588866d00b4cc5e40df61ca90fb6c55f255410dc19b804cb88"} Jan 05 21:55:28 crc kubenswrapper[4910]: I0105 21:55:28.287769 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"7f67dc26c8ee3554a0ad6caebe6e745b983083261861b05bfecd42aa8d8c1419"} Jan 05 21:55:28 crc kubenswrapper[4910]: I0105 21:55:28.288072 4910 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="969290f7-140e-4c49-a197-cfab07022a17" Jan 05 21:55:28 crc kubenswrapper[4910]: I0105 21:55:28.289060 4910 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="969290f7-140e-4c49-a197-cfab07022a17" Jan 05 21:55:30 crc kubenswrapper[4910]: I0105 21:55:30.748435 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:55:30 crc kubenswrapper[4910]: I0105 21:55:30.749028 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:55:30 crc kubenswrapper[4910]: I0105 21:55:30.759583 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:55:33 crc kubenswrapper[4910]: I0105 21:55:33.315280 4910 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:55:34 crc kubenswrapper[4910]: I0105 21:55:34.328993 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:55:34 crc kubenswrapper[4910]: I0105 21:55:34.329150 4910 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="969290f7-140e-4c49-a197-cfab07022a17" Jan 05 21:55:34 crc kubenswrapper[4910]: I0105 21:55:34.329198 4910 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="969290f7-140e-4c49-a197-cfab07022a17" Jan 05 21:55:34 crc kubenswrapper[4910]: I0105 21:55:34.334400 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:55:34 crc kubenswrapper[4910]: I0105 21:55:34.338400 4910 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="61aec468-b381-4a9e-b74c-74e6d6472e43" Jan 05 21:55:35 crc kubenswrapper[4910]: I0105 21:55:35.337107 4910 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="969290f7-140e-4c49-a197-cfab07022a17" Jan 05 21:55:35 crc kubenswrapper[4910]: I0105 21:55:35.337693 4910 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="969290f7-140e-4c49-a197-cfab07022a17" Jan 05 21:55:36 crc kubenswrapper[4910]: I0105 21:55:36.026486 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 05 21:55:36 crc kubenswrapper[4910]: I0105 21:55:36.344330 4910 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="969290f7-140e-4c49-a197-cfab07022a17" Jan 05 21:55:36 crc kubenswrapper[4910]: I0105 21:55:36.344395 4910 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="969290f7-140e-4c49-a197-cfab07022a17" Jan 05 21:55:36 crc kubenswrapper[4910]: I0105 21:55:36.466236 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 05 21:55:36 crc kubenswrapper[4910]: I0105 21:55:36.466979 4910 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 05 21:55:36 crc kubenswrapper[4910]: I0105 21:55:36.467081 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 05 21:55:38 crc kubenswrapper[4910]: I0105 21:55:38.756285 4910 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="61aec468-b381-4a9e-b74c-74e6d6472e43" Jan 05 21:55:42 crc kubenswrapper[4910]: I0105 21:55:42.670180 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 05 21:55:42 crc kubenswrapper[4910]: I0105 21:55:42.695941 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 05 21:55:43 crc kubenswrapper[4910]: I0105 21:55:43.128821 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 05 21:55:43 crc kubenswrapper[4910]: I0105 21:55:43.222872 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 05 21:55:43 crc kubenswrapper[4910]: I0105 21:55:43.245098 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 05 21:55:43 crc kubenswrapper[4910]: I0105 21:55:43.399545 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 05 21:55:43 crc kubenswrapper[4910]: I0105 21:55:43.913180 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 05 21:55:44 crc kubenswrapper[4910]: I0105 21:55:44.025989 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 05 21:55:44 crc kubenswrapper[4910]: I0105 21:55:44.323640 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 05 21:55:44 crc kubenswrapper[4910]: I0105 21:55:44.337198 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 05 21:55:44 crc kubenswrapper[4910]: I0105 21:55:44.512967 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 05 21:55:44 crc kubenswrapper[4910]: I0105 21:55:44.895526 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 05 21:55:44 crc kubenswrapper[4910]: I0105 21:55:44.930896 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 05 21:55:45 crc kubenswrapper[4910]: I0105 21:55:45.049759 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 05 21:55:45 crc kubenswrapper[4910]: I0105 21:55:45.106752 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 05 21:55:45 crc kubenswrapper[4910]: I0105 21:55:45.145479 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 05 21:55:45 crc kubenswrapper[4910]: I0105 21:55:45.329323 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 05 21:55:45 crc kubenswrapper[4910]: I0105 21:55:45.340245 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 05 21:55:45 crc kubenswrapper[4910]: I0105 21:55:45.419646 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 05 21:55:45 crc kubenswrapper[4910]: I0105 21:55:45.530311 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 05 21:55:45 crc kubenswrapper[4910]: I0105 21:55:45.814455 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 05 21:55:45 crc kubenswrapper[4910]: I0105 21:55:45.939439 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 05 21:55:46 crc kubenswrapper[4910]: I0105 21:55:46.042095 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 05 21:55:46 crc kubenswrapper[4910]: I0105 21:55:46.096512 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 05 21:55:46 crc kubenswrapper[4910]: I0105 21:55:46.168715 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 05 21:55:46 crc kubenswrapper[4910]: I0105 21:55:46.192840 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 05 21:55:46 crc kubenswrapper[4910]: I0105 21:55:46.207109 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 05 21:55:46 crc kubenswrapper[4910]: I0105 21:55:46.264890 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 05 21:55:46 crc kubenswrapper[4910]: I0105 21:55:46.269246 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 05 21:55:46 crc kubenswrapper[4910]: I0105 21:55:46.368857 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 05 21:55:46 crc kubenswrapper[4910]: I0105 21:55:46.439730 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 05 21:55:46 crc kubenswrapper[4910]: I0105 21:55:46.466841 4910 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 05 21:55:46 crc kubenswrapper[4910]: I0105 21:55:46.466913 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 05 21:55:46 crc kubenswrapper[4910]: I0105 21:55:46.477910 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 05 21:55:46 crc kubenswrapper[4910]: I0105 21:55:46.608067 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 05 21:55:46 crc kubenswrapper[4910]: I0105 21:55:46.745283 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 05 21:55:46 crc kubenswrapper[4910]: I0105 21:55:46.777144 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 05 21:55:46 crc kubenswrapper[4910]: I0105 21:55:46.812400 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 05 21:55:46 crc kubenswrapper[4910]: I0105 21:55:46.838852 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 05 21:55:46 crc kubenswrapper[4910]: I0105 21:55:46.842355 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 05 21:55:46 crc kubenswrapper[4910]: I0105 21:55:46.920820 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 05 21:55:46 crc kubenswrapper[4910]: I0105 21:55:46.942830 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 05 21:55:47 crc kubenswrapper[4910]: I0105 21:55:47.061583 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 05 21:55:47 crc kubenswrapper[4910]: I0105 21:55:47.164282 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 05 21:55:47 crc kubenswrapper[4910]: I0105 21:55:47.234909 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 05 21:55:47 crc kubenswrapper[4910]: I0105 21:55:47.254935 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 05 21:55:47 crc kubenswrapper[4910]: I0105 21:55:47.450737 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 05 21:55:47 crc kubenswrapper[4910]: I0105 21:55:47.484184 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 05 21:55:47 crc kubenswrapper[4910]: I0105 21:55:47.491112 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 05 21:55:47 crc kubenswrapper[4910]: I0105 21:55:47.556521 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 05 21:55:47 crc kubenswrapper[4910]: I0105 21:55:47.575564 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 05 21:55:47 crc kubenswrapper[4910]: I0105 21:55:47.605228 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 05 21:55:47 crc kubenswrapper[4910]: I0105 21:55:47.624872 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 05 21:55:47 crc kubenswrapper[4910]: I0105 21:55:47.749425 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 05 21:55:47 crc kubenswrapper[4910]: I0105 21:55:47.775828 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 05 21:55:47 crc kubenswrapper[4910]: I0105 21:55:47.821027 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 05 21:55:47 crc kubenswrapper[4910]: I0105 21:55:47.830963 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 05 21:55:47 crc kubenswrapper[4910]: I0105 21:55:47.904336 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 05 21:55:47 crc kubenswrapper[4910]: I0105 21:55:47.984643 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 05 21:55:47 crc kubenswrapper[4910]: I0105 21:55:47.988397 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 05 21:55:48 crc kubenswrapper[4910]: I0105 21:55:48.037569 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 05 21:55:48 crc kubenswrapper[4910]: I0105 21:55:48.058530 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 05 21:55:48 crc kubenswrapper[4910]: I0105 21:55:48.086994 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 05 21:55:48 crc kubenswrapper[4910]: I0105 21:55:48.092332 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 05 21:55:48 crc kubenswrapper[4910]: I0105 21:55:48.182512 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 05 21:55:48 crc kubenswrapper[4910]: I0105 21:55:48.294878 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 05 21:55:48 crc kubenswrapper[4910]: I0105 21:55:48.410278 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 05 21:55:48 crc kubenswrapper[4910]: I0105 21:55:48.451722 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 05 21:55:48 crc kubenswrapper[4910]: I0105 21:55:48.505232 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 05 21:55:48 crc kubenswrapper[4910]: I0105 21:55:48.575186 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 05 21:55:48 crc kubenswrapper[4910]: I0105 21:55:48.578684 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 05 21:55:48 crc kubenswrapper[4910]: I0105 21:55:48.748906 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 05 21:55:48 crc kubenswrapper[4910]: I0105 21:55:48.763347 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 05 21:55:48 crc kubenswrapper[4910]: I0105 21:55:48.788235 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 05 21:55:48 crc kubenswrapper[4910]: I0105 21:55:48.915106 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 05 21:55:49 crc kubenswrapper[4910]: I0105 21:55:49.032743 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 05 21:55:49 crc kubenswrapper[4910]: I0105 21:55:49.044983 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 05 21:55:49 crc kubenswrapper[4910]: I0105 21:55:49.071341 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 05 21:55:49 crc kubenswrapper[4910]: I0105 21:55:49.113551 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 05 21:55:49 crc kubenswrapper[4910]: I0105 21:55:49.120611 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 05 21:55:49 crc kubenswrapper[4910]: I0105 21:55:49.138805 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 05 21:55:49 crc kubenswrapper[4910]: I0105 21:55:49.156464 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 05 21:55:49 crc kubenswrapper[4910]: I0105 21:55:49.218710 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 05 21:55:49 crc kubenswrapper[4910]: I0105 21:55:49.271595 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 05 21:55:49 crc kubenswrapper[4910]: I0105 21:55:49.335516 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 05 21:55:49 crc kubenswrapper[4910]: I0105 21:55:49.405443 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 05 21:55:49 crc kubenswrapper[4910]: I0105 21:55:49.470057 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 05 21:55:49 crc kubenswrapper[4910]: I0105 21:55:49.494740 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 05 21:55:49 crc kubenswrapper[4910]: I0105 21:55:49.528412 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 05 21:55:49 crc kubenswrapper[4910]: I0105 21:55:49.529660 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 05 21:55:49 crc kubenswrapper[4910]: I0105 21:55:49.608913 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 05 21:55:49 crc kubenswrapper[4910]: I0105 21:55:49.623640 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 05 21:55:49 crc kubenswrapper[4910]: I0105 21:55:49.736627 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 05 21:55:49 crc kubenswrapper[4910]: I0105 21:55:49.758360 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 05 21:55:49 crc kubenswrapper[4910]: I0105 21:55:49.801019 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 05 21:55:49 crc kubenswrapper[4910]: I0105 21:55:49.953778 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 05 21:55:49 crc kubenswrapper[4910]: I0105 21:55:49.989389 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 05 21:55:50 crc kubenswrapper[4910]: I0105 21:55:50.036784 4910 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 05 21:55:50 crc kubenswrapper[4910]: I0105 21:55:50.077588 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 05 21:55:50 crc kubenswrapper[4910]: I0105 21:55:50.109802 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 05 21:55:50 crc kubenswrapper[4910]: I0105 21:55:50.135241 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 05 21:55:50 crc kubenswrapper[4910]: I0105 21:55:50.136558 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 05 21:55:50 crc kubenswrapper[4910]: I0105 21:55:50.213146 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 05 21:55:50 crc kubenswrapper[4910]: I0105 21:55:50.252259 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 05 21:55:50 crc kubenswrapper[4910]: I0105 21:55:50.273236 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 05 21:55:50 crc kubenswrapper[4910]: I0105 21:55:50.324579 4910 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 05 21:55:50 crc kubenswrapper[4910]: I0105 21:55:50.356116 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 05 21:55:50 crc kubenswrapper[4910]: I0105 21:55:50.469969 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 05 21:55:50 crc kubenswrapper[4910]: I0105 21:55:50.585148 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 05 21:55:50 crc kubenswrapper[4910]: I0105 21:55:50.615024 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 05 21:55:50 crc kubenswrapper[4910]: I0105 21:55:50.740381 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 05 21:55:50 crc kubenswrapper[4910]: I0105 21:55:50.816255 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 05 21:55:50 crc kubenswrapper[4910]: I0105 21:55:50.878807 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 05 21:55:50 crc kubenswrapper[4910]: I0105 21:55:50.891483 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 05 21:55:50 crc kubenswrapper[4910]: I0105 21:55:50.952186 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 05 21:55:50 crc kubenswrapper[4910]: I0105 21:55:50.972300 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 05 21:55:51 crc kubenswrapper[4910]: I0105 21:55:51.007675 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 05 21:55:51 crc kubenswrapper[4910]: I0105 21:55:51.027760 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 05 21:55:51 crc kubenswrapper[4910]: I0105 21:55:51.210788 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 05 21:55:51 crc kubenswrapper[4910]: I0105 21:55:51.252464 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 05 21:55:51 crc kubenswrapper[4910]: I0105 21:55:51.314772 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 05 21:55:51 crc kubenswrapper[4910]: I0105 21:55:51.368382 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 05 21:55:51 crc kubenswrapper[4910]: I0105 21:55:51.426988 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 05 21:55:51 crc kubenswrapper[4910]: I0105 21:55:51.509054 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 05 21:55:51 crc kubenswrapper[4910]: I0105 21:55:51.530214 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 05 21:55:51 crc kubenswrapper[4910]: I0105 21:55:51.539531 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 05 21:55:51 crc kubenswrapper[4910]: I0105 21:55:51.547493 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 05 21:55:51 crc kubenswrapper[4910]: I0105 21:55:51.572955 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 05 21:55:51 crc kubenswrapper[4910]: I0105 21:55:51.861566 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 05 21:55:51 crc kubenswrapper[4910]: I0105 21:55:51.867576 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 05 21:55:51 crc kubenswrapper[4910]: I0105 21:55:51.886885 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 05 21:55:51 crc kubenswrapper[4910]: I0105 21:55:51.892690 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 05 21:55:51 crc kubenswrapper[4910]: I0105 21:55:51.943262 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 05 21:55:51 crc kubenswrapper[4910]: I0105 21:55:51.946511 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 05 21:55:52 crc kubenswrapper[4910]: I0105 21:55:52.087311 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 05 21:55:52 crc kubenswrapper[4910]: I0105 21:55:52.105538 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 05 21:55:52 crc kubenswrapper[4910]: I0105 21:55:52.113501 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 05 21:55:52 crc kubenswrapper[4910]: I0105 21:55:52.122979 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 05 21:55:52 crc kubenswrapper[4910]: I0105 21:55:52.159018 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 05 21:55:52 crc kubenswrapper[4910]: I0105 21:55:52.206571 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 05 21:55:52 crc kubenswrapper[4910]: I0105 21:55:52.217331 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 05 21:55:52 crc kubenswrapper[4910]: I0105 21:55:52.287598 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 05 21:55:52 crc kubenswrapper[4910]: I0105 21:55:52.294555 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 05 21:55:52 crc kubenswrapper[4910]: I0105 21:55:52.369166 4910 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 05 21:55:52 crc kubenswrapper[4910]: I0105 21:55:52.375373 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=41.375352149 podStartE2EDuration="41.375352149s" podCreationTimestamp="2026-01-05 21:55:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:55:33.040822521 +0000 UTC m=+264.618320201" watchObservedRunningTime="2026-01-05 21:55:52.375352149 +0000 UTC m=+283.952849819" Jan 05 21:55:52 crc kubenswrapper[4910]: I0105 21:55:52.376438 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 05 21:55:52 crc kubenswrapper[4910]: I0105 21:55:52.376478 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 05 21:55:52 crc kubenswrapper[4910]: I0105 21:55:52.380592 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 05 21:55:52 crc kubenswrapper[4910]: I0105 21:55:52.395578 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=19.395559064 podStartE2EDuration="19.395559064s" podCreationTimestamp="2026-01-05 21:55:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:55:52.392039803 +0000 UTC m=+283.969537473" watchObservedRunningTime="2026-01-05 21:55:52.395559064 +0000 UTC m=+283.973056744" Jan 05 21:55:52 crc kubenswrapper[4910]: I0105 21:55:52.397441 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 05 21:55:52 crc kubenswrapper[4910]: I0105 21:55:52.491904 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 05 21:55:52 crc kubenswrapper[4910]: I0105 21:55:52.551694 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 05 21:55:52 crc kubenswrapper[4910]: I0105 21:55:52.555325 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 05 21:55:52 crc kubenswrapper[4910]: I0105 21:55:52.589981 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 05 21:55:52 crc kubenswrapper[4910]: I0105 21:55:52.640806 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 05 21:55:52 crc kubenswrapper[4910]: I0105 21:55:52.678904 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 05 21:55:52 crc kubenswrapper[4910]: I0105 21:55:52.680867 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 05 21:55:52 crc kubenswrapper[4910]: I0105 21:55:52.780924 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 05 21:55:52 crc kubenswrapper[4910]: I0105 21:55:52.781560 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 05 21:55:52 crc kubenswrapper[4910]: I0105 21:55:52.793555 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 05 21:55:52 crc kubenswrapper[4910]: I0105 21:55:52.891075 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 05 21:55:52 crc kubenswrapper[4910]: I0105 21:55:52.891385 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 05 21:55:53 crc kubenswrapper[4910]: I0105 21:55:53.022652 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 05 21:55:53 crc kubenswrapper[4910]: I0105 21:55:53.101300 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 05 21:55:53 crc kubenswrapper[4910]: I0105 21:55:53.102091 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 05 21:55:53 crc kubenswrapper[4910]: I0105 21:55:53.176178 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 05 21:55:53 crc kubenswrapper[4910]: I0105 21:55:53.180137 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 05 21:55:53 crc kubenswrapper[4910]: I0105 21:55:53.216454 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 05 21:55:53 crc kubenswrapper[4910]: I0105 21:55:53.228878 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 05 21:55:53 crc kubenswrapper[4910]: I0105 21:55:53.228928 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 05 21:55:53 crc kubenswrapper[4910]: I0105 21:55:53.230320 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 05 21:55:53 crc kubenswrapper[4910]: I0105 21:55:53.296083 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 05 21:55:53 crc kubenswrapper[4910]: I0105 21:55:53.327743 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 05 21:55:53 crc kubenswrapper[4910]: I0105 21:55:53.361584 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 05 21:55:53 crc kubenswrapper[4910]: I0105 21:55:53.422143 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 05 21:55:53 crc kubenswrapper[4910]: I0105 21:55:53.505482 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 05 21:55:53 crc kubenswrapper[4910]: I0105 21:55:53.556163 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 05 21:55:53 crc kubenswrapper[4910]: I0105 21:55:53.574331 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 05 21:55:53 crc kubenswrapper[4910]: I0105 21:55:53.583143 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 05 21:55:53 crc kubenswrapper[4910]: I0105 21:55:53.754879 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 05 21:55:53 crc kubenswrapper[4910]: I0105 21:55:53.852671 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 05 21:55:54 crc kubenswrapper[4910]: I0105 21:55:54.194478 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 05 21:55:54 crc kubenswrapper[4910]: I0105 21:55:54.231953 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 05 21:55:54 crc kubenswrapper[4910]: I0105 21:55:54.253766 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 05 21:55:54 crc kubenswrapper[4910]: I0105 21:55:54.304446 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 05 21:55:54 crc kubenswrapper[4910]: I0105 21:55:54.496906 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 05 21:55:54 crc kubenswrapper[4910]: I0105 21:55:54.564165 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 05 21:55:54 crc kubenswrapper[4910]: I0105 21:55:54.656137 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 05 21:55:54 crc kubenswrapper[4910]: I0105 21:55:54.816861 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 05 21:55:54 crc kubenswrapper[4910]: I0105 21:55:54.834388 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 05 21:55:54 crc kubenswrapper[4910]: I0105 21:55:54.871725 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 05 21:55:54 crc kubenswrapper[4910]: I0105 21:55:54.956060 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 05 21:55:54 crc kubenswrapper[4910]: I0105 21:55:54.995251 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 05 21:55:54 crc kubenswrapper[4910]: I0105 21:55:54.999553 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 05 21:55:55 crc kubenswrapper[4910]: I0105 21:55:55.002161 4910 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 05 21:55:55 crc kubenswrapper[4910]: I0105 21:55:55.074463 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 05 21:55:55 crc kubenswrapper[4910]: I0105 21:55:55.105528 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 05 21:55:55 crc kubenswrapper[4910]: I0105 21:55:55.118448 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 05 21:55:55 crc kubenswrapper[4910]: I0105 21:55:55.183388 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 05 21:55:55 crc kubenswrapper[4910]: I0105 21:55:55.209757 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 05 21:55:55 crc kubenswrapper[4910]: I0105 21:55:55.283239 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 05 21:55:55 crc kubenswrapper[4910]: I0105 21:55:55.380732 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 05 21:55:55 crc kubenswrapper[4910]: I0105 21:55:55.389412 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 05 21:55:55 crc kubenswrapper[4910]: I0105 21:55:55.391053 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 05 21:55:55 crc kubenswrapper[4910]: I0105 21:55:55.410270 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 05 21:55:55 crc kubenswrapper[4910]: I0105 21:55:55.441000 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 05 21:55:55 crc kubenswrapper[4910]: I0105 21:55:55.457705 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 05 21:55:55 crc kubenswrapper[4910]: I0105 21:55:55.526346 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 05 21:55:55 crc kubenswrapper[4910]: I0105 21:55:55.531661 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 05 21:55:55 crc kubenswrapper[4910]: I0105 21:55:55.612980 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 05 21:55:55 crc kubenswrapper[4910]: I0105 21:55:55.651355 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 05 21:55:55 crc kubenswrapper[4910]: I0105 21:55:55.695889 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 05 21:55:55 crc kubenswrapper[4910]: I0105 21:55:55.700759 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 05 21:55:55 crc kubenswrapper[4910]: I0105 21:55:55.726693 4910 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 05 21:55:55 crc kubenswrapper[4910]: I0105 21:55:55.726978 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://98c820565e6cda88f268a07a35f60b24eaa6af65d99da902f590dd6f3d9b9380" gracePeriod=5 Jan 05 21:55:55 crc kubenswrapper[4910]: I0105 21:55:55.754391 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 05 21:55:55 crc kubenswrapper[4910]: I0105 21:55:55.799250 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 05 21:55:55 crc kubenswrapper[4910]: I0105 21:55:55.820160 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 05 21:55:55 crc kubenswrapper[4910]: I0105 21:55:55.906473 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 05 21:55:55 crc kubenswrapper[4910]: I0105 21:55:55.912864 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 05 21:55:55 crc kubenswrapper[4910]: I0105 21:55:55.968069 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 05 21:55:56 crc kubenswrapper[4910]: I0105 21:55:56.045988 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 05 21:55:56 crc kubenswrapper[4910]: I0105 21:55:56.056328 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 05 21:55:56 crc kubenswrapper[4910]: I0105 21:55:56.107941 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 05 21:55:56 crc kubenswrapper[4910]: I0105 21:55:56.467057 4910 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 05 21:55:56 crc kubenswrapper[4910]: I0105 21:55:56.467250 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 05 21:55:56 crc kubenswrapper[4910]: I0105 21:55:56.467416 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 05 21:55:56 crc kubenswrapper[4910]: I0105 21:55:56.468592 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-controller-manager" containerStatusID={"Type":"cri-o","ID":"199c08933606db3a7ffebc792f9dded017d835f8bece24f165885d1d85b2e554"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container kube-controller-manager failed startup probe, will be restarted" Jan 05 21:55:56 crc kubenswrapper[4910]: I0105 21:55:56.468795 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" containerID="cri-o://199c08933606db3a7ffebc792f9dded017d835f8bece24f165885d1d85b2e554" gracePeriod=30 Jan 05 21:55:56 crc kubenswrapper[4910]: I0105 21:55:56.600586 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 05 21:55:56 crc kubenswrapper[4910]: I0105 21:55:56.602815 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 05 21:55:56 crc kubenswrapper[4910]: I0105 21:55:56.644366 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 05 21:55:56 crc kubenswrapper[4910]: I0105 21:55:56.727625 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 05 21:55:56 crc kubenswrapper[4910]: I0105 21:55:56.846005 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 05 21:55:56 crc kubenswrapper[4910]: I0105 21:55:56.862366 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 05 21:55:56 crc kubenswrapper[4910]: I0105 21:55:56.947635 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 05 21:55:56 crc kubenswrapper[4910]: I0105 21:55:56.959197 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 05 21:55:56 crc kubenswrapper[4910]: I0105 21:55:56.982984 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 05 21:55:57 crc kubenswrapper[4910]: I0105 21:55:57.047286 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 05 21:55:57 crc kubenswrapper[4910]: I0105 21:55:57.100859 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 05 21:55:57 crc kubenswrapper[4910]: I0105 21:55:57.256797 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 05 21:55:57 crc kubenswrapper[4910]: I0105 21:55:57.259476 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 05 21:55:57 crc kubenswrapper[4910]: I0105 21:55:57.284416 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 05 21:55:57 crc kubenswrapper[4910]: I0105 21:55:57.321573 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 05 21:55:57 crc kubenswrapper[4910]: I0105 21:55:57.388083 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 05 21:55:57 crc kubenswrapper[4910]: I0105 21:55:57.407649 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 05 21:55:57 crc kubenswrapper[4910]: I0105 21:55:57.449082 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 05 21:55:57 crc kubenswrapper[4910]: I0105 21:55:57.482935 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 05 21:55:57 crc kubenswrapper[4910]: I0105 21:55:57.485281 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 05 21:55:57 crc kubenswrapper[4910]: I0105 21:55:57.500546 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 05 21:55:57 crc kubenswrapper[4910]: I0105 21:55:57.560297 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 05 21:55:57 crc kubenswrapper[4910]: I0105 21:55:57.623715 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 05 21:55:57 crc kubenswrapper[4910]: I0105 21:55:57.641912 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 05 21:55:57 crc kubenswrapper[4910]: I0105 21:55:57.781783 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 05 21:55:57 crc kubenswrapper[4910]: I0105 21:55:57.929459 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 05 21:55:58 crc kubenswrapper[4910]: I0105 21:55:58.061758 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 05 21:55:58 crc kubenswrapper[4910]: I0105 21:55:58.428135 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 05 21:55:58 crc kubenswrapper[4910]: I0105 21:55:58.460623 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 05 21:55:58 crc kubenswrapper[4910]: I0105 21:55:58.500364 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 05 21:55:58 crc kubenswrapper[4910]: I0105 21:55:58.517495 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 05 21:55:58 crc kubenswrapper[4910]: I0105 21:55:58.537394 4910 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 05 21:55:58 crc kubenswrapper[4910]: I0105 21:55:58.619243 4910 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 05 21:55:58 crc kubenswrapper[4910]: I0105 21:55:58.778610 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 05 21:55:59 crc kubenswrapper[4910]: I0105 21:55:59.040193 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 05 21:56:00 crc kubenswrapper[4910]: I0105 21:56:00.115657 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 05 21:56:00 crc kubenswrapper[4910]: I0105 21:56:00.439565 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2hg8l"] Jan 05 21:56:00 crc kubenswrapper[4910]: I0105 21:56:00.454880 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7tvk2"] Jan 05 21:56:00 crc kubenswrapper[4910]: I0105 21:56:00.455328 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-7tvk2" podUID="e67293c9-fc75-468d-b1c5-c09f9ad46dda" containerName="registry-server" containerID="cri-o://1de8eacb26cef6b46de2bc9cf5e247e88efa6be2b0bce9cd969f18aa75dd4c17" gracePeriod=30 Jan 05 21:56:00 crc kubenswrapper[4910]: I0105 21:56:00.463808 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-llpdj"] Jan 05 21:56:00 crc kubenswrapper[4910]: I0105 21:56:00.464135 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-llpdj" podUID="df73d562-aee4-4b56-b241-bd31f5c95714" containerName="marketplace-operator" containerID="cri-o://11a17322adb2c5ff1ea5fe398d7f644f21e7b4480df5304859a2db118fe121f8" gracePeriod=30 Jan 05 21:56:00 crc kubenswrapper[4910]: I0105 21:56:00.472089 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pzbmf"] Jan 05 21:56:00 crc kubenswrapper[4910]: I0105 21:56:00.472935 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-pzbmf" podUID="e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e" containerName="registry-server" containerID="cri-o://32c0bb0052253f8dd2c344748d2eb39984ef98f99f8d9815e4bcd9f36d764353" gracePeriod=30 Jan 05 21:56:00 crc kubenswrapper[4910]: I0105 21:56:00.475708 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ghpct"] Jan 05 21:56:00 crc kubenswrapper[4910]: I0105 21:56:00.481613 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-ghpct" podUID="060b3be3-5d9d-47dc-a01e-7a79aa9f13b4" containerName="registry-server" containerID="cri-o://29971467a3dcb3c04d6a710368043b3869f40ed8b96ccb86c936ddf566fc632c" gracePeriod=30 Jan 05 21:56:00 crc kubenswrapper[4910]: I0105 21:56:00.523036 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-2hg8l" podUID="340fecda-72dc-4870-887a-29b5ef58ae94" containerName="registry-server" containerID="cri-o://bd4e3409bf7d1d3b572fd967c5225bc3dee6e4b1ee3a4eda613979d62a9647ad" gracePeriod=30 Jan 05 21:56:00 crc kubenswrapper[4910]: I0105 21:56:00.979186 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pzbmf" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.044723 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2hg8l" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.049663 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7tvk2" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.062438 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-llpdj" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.072284 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ghpct" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.074295 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/340fecda-72dc-4870-887a-29b5ef58ae94-utilities\") pod \"340fecda-72dc-4870-887a-29b5ef58ae94\" (UID: \"340fecda-72dc-4870-887a-29b5ef58ae94\") " Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.074340 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e67293c9-fc75-468d-b1c5-c09f9ad46dda-catalog-content\") pod \"e67293c9-fc75-468d-b1c5-c09f9ad46dda\" (UID: \"e67293c9-fc75-468d-b1c5-c09f9ad46dda\") " Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.074373 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z7xvk\" (UniqueName: \"kubernetes.io/projected/e67293c9-fc75-468d-b1c5-c09f9ad46dda-kube-api-access-z7xvk\") pod \"e67293c9-fc75-468d-b1c5-c09f9ad46dda\" (UID: \"e67293c9-fc75-468d-b1c5-c09f9ad46dda\") " Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.074405 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/df73d562-aee4-4b56-b241-bd31f5c95714-marketplace-operator-metrics\") pod \"df73d562-aee4-4b56-b241-bd31f5c95714\" (UID: \"df73d562-aee4-4b56-b241-bd31f5c95714\") " Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.074432 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/340fecda-72dc-4870-887a-29b5ef58ae94-catalog-content\") pod \"340fecda-72dc-4870-887a-29b5ef58ae94\" (UID: \"340fecda-72dc-4870-887a-29b5ef58ae94\") " Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.074456 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e-utilities\") pod \"e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e\" (UID: \"e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e\") " Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.074489 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/df73d562-aee4-4b56-b241-bd31f5c95714-marketplace-trusted-ca\") pod \"df73d562-aee4-4b56-b241-bd31f5c95714\" (UID: \"df73d562-aee4-4b56-b241-bd31f5c95714\") " Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.074515 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e-catalog-content\") pod \"e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e\" (UID: \"e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e\") " Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.074567 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e67293c9-fc75-468d-b1c5-c09f9ad46dda-utilities\") pod \"e67293c9-fc75-468d-b1c5-c09f9ad46dda\" (UID: \"e67293c9-fc75-468d-b1c5-c09f9ad46dda\") " Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.074601 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zsb2f\" (UniqueName: \"kubernetes.io/projected/340fecda-72dc-4870-887a-29b5ef58ae94-kube-api-access-zsb2f\") pod \"340fecda-72dc-4870-887a-29b5ef58ae94\" (UID: \"340fecda-72dc-4870-887a-29b5ef58ae94\") " Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.074626 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dc5zf\" (UniqueName: \"kubernetes.io/projected/df73d562-aee4-4b56-b241-bd31f5c95714-kube-api-access-dc5zf\") pod \"df73d562-aee4-4b56-b241-bd31f5c95714\" (UID: \"df73d562-aee4-4b56-b241-bd31f5c95714\") " Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.074661 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mvgsn\" (UniqueName: \"kubernetes.io/projected/e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e-kube-api-access-mvgsn\") pod \"e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e\" (UID: \"e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e\") " Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.075736 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e-utilities" (OuterVolumeSpecName: "utilities") pod "e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e" (UID: "e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.075795 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/df73d562-aee4-4b56-b241-bd31f5c95714-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "df73d562-aee4-4b56-b241-bd31f5c95714" (UID: "df73d562-aee4-4b56-b241-bd31f5c95714"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.075831 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/340fecda-72dc-4870-887a-29b5ef58ae94-utilities" (OuterVolumeSpecName: "utilities") pod "340fecda-72dc-4870-887a-29b5ef58ae94" (UID: "340fecda-72dc-4870-887a-29b5ef58ae94"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.078994 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e67293c9-fc75-468d-b1c5-c09f9ad46dda-utilities" (OuterVolumeSpecName: "utilities") pod "e67293c9-fc75-468d-b1c5-c09f9ad46dda" (UID: "e67293c9-fc75-468d-b1c5-c09f9ad46dda"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.082714 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/340fecda-72dc-4870-887a-29b5ef58ae94-kube-api-access-zsb2f" (OuterVolumeSpecName: "kube-api-access-zsb2f") pod "340fecda-72dc-4870-887a-29b5ef58ae94" (UID: "340fecda-72dc-4870-887a-29b5ef58ae94"). InnerVolumeSpecName "kube-api-access-zsb2f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.083689 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df73d562-aee4-4b56-b241-bd31f5c95714-kube-api-access-dc5zf" (OuterVolumeSpecName: "kube-api-access-dc5zf") pod "df73d562-aee4-4b56-b241-bd31f5c95714" (UID: "df73d562-aee4-4b56-b241-bd31f5c95714"). InnerVolumeSpecName "kube-api-access-dc5zf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.084595 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e-kube-api-access-mvgsn" (OuterVolumeSpecName: "kube-api-access-mvgsn") pod "e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e" (UID: "e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e"). InnerVolumeSpecName "kube-api-access-mvgsn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.085322 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/df73d562-aee4-4b56-b241-bd31f5c95714-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "df73d562-aee4-4b56-b241-bd31f5c95714" (UID: "df73d562-aee4-4b56-b241-bd31f5c95714"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.091095 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e67293c9-fc75-468d-b1c5-c09f9ad46dda-kube-api-access-z7xvk" (OuterVolumeSpecName: "kube-api-access-z7xvk") pod "e67293c9-fc75-468d-b1c5-c09f9ad46dda" (UID: "e67293c9-fc75-468d-b1c5-c09f9ad46dda"). InnerVolumeSpecName "kube-api-access-z7xvk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.099330 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e" (UID: "e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.156382 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e67293c9-fc75-468d-b1c5-c09f9ad46dda-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e67293c9-fc75-468d-b1c5-c09f9ad46dda" (UID: "e67293c9-fc75-468d-b1c5-c09f9ad46dda"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.167026 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/340fecda-72dc-4870-887a-29b5ef58ae94-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "340fecda-72dc-4870-887a-29b5ef58ae94" (UID: "340fecda-72dc-4870-887a-29b5ef58ae94"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.176038 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/060b3be3-5d9d-47dc-a01e-7a79aa9f13b4-utilities\") pod \"060b3be3-5d9d-47dc-a01e-7a79aa9f13b4\" (UID: \"060b3be3-5d9d-47dc-a01e-7a79aa9f13b4\") " Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.176109 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/060b3be3-5d9d-47dc-a01e-7a79aa9f13b4-catalog-content\") pod \"060b3be3-5d9d-47dc-a01e-7a79aa9f13b4\" (UID: \"060b3be3-5d9d-47dc-a01e-7a79aa9f13b4\") " Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.176187 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-967cg\" (UniqueName: \"kubernetes.io/projected/060b3be3-5d9d-47dc-a01e-7a79aa9f13b4-kube-api-access-967cg\") pod \"060b3be3-5d9d-47dc-a01e-7a79aa9f13b4\" (UID: \"060b3be3-5d9d-47dc-a01e-7a79aa9f13b4\") " Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.176421 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e67293c9-fc75-468d-b1c5-c09f9ad46dda-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.176450 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zsb2f\" (UniqueName: \"kubernetes.io/projected/340fecda-72dc-4870-887a-29b5ef58ae94-kube-api-access-zsb2f\") on node \"crc\" DevicePath \"\"" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.176467 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dc5zf\" (UniqueName: \"kubernetes.io/projected/df73d562-aee4-4b56-b241-bd31f5c95714-kube-api-access-dc5zf\") on node \"crc\" DevicePath \"\"" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.176481 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mvgsn\" (UniqueName: \"kubernetes.io/projected/e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e-kube-api-access-mvgsn\") on node \"crc\" DevicePath \"\"" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.176493 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/340fecda-72dc-4870-887a-29b5ef58ae94-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.176504 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e67293c9-fc75-468d-b1c5-c09f9ad46dda-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.176516 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z7xvk\" (UniqueName: \"kubernetes.io/projected/e67293c9-fc75-468d-b1c5-c09f9ad46dda-kube-api-access-z7xvk\") on node \"crc\" DevicePath \"\"" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.176530 4910 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/df73d562-aee4-4b56-b241-bd31f5c95714-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.176542 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/340fecda-72dc-4870-887a-29b5ef58ae94-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.176554 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.176570 4910 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/df73d562-aee4-4b56-b241-bd31f5c95714-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.176583 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.179956 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/060b3be3-5d9d-47dc-a01e-7a79aa9f13b4-kube-api-access-967cg" (OuterVolumeSpecName: "kube-api-access-967cg") pod "060b3be3-5d9d-47dc-a01e-7a79aa9f13b4" (UID: "060b3be3-5d9d-47dc-a01e-7a79aa9f13b4"). InnerVolumeSpecName "kube-api-access-967cg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.180905 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/060b3be3-5d9d-47dc-a01e-7a79aa9f13b4-utilities" (OuterVolumeSpecName: "utilities") pod "060b3be3-5d9d-47dc-a01e-7a79aa9f13b4" (UID: "060b3be3-5d9d-47dc-a01e-7a79aa9f13b4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.277235 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-967cg\" (UniqueName: \"kubernetes.io/projected/060b3be3-5d9d-47dc-a01e-7a79aa9f13b4-kube-api-access-967cg\") on node \"crc\" DevicePath \"\"" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.277279 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/060b3be3-5d9d-47dc-a01e-7a79aa9f13b4-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.294421 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.294521 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.307370 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/060b3be3-5d9d-47dc-a01e-7a79aa9f13b4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "060b3be3-5d9d-47dc-a01e-7a79aa9f13b4" (UID: "060b3be3-5d9d-47dc-a01e-7a79aa9f13b4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.378989 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.379142 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.379309 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.379352 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.379405 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.379395 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.379495 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.379539 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.379678 4910 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.379702 4910 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.379712 4910 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.379726 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/060b3be3-5d9d-47dc-a01e-7a79aa9f13b4-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.379677 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.387753 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.481852 4910 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.481928 4910 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.532606 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.532674 4910 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="98c820565e6cda88f268a07a35f60b24eaa6af65d99da902f590dd6f3d9b9380" exitCode=137 Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.532808 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.533503 4910 scope.go:117] "RemoveContainer" containerID="98c820565e6cda88f268a07a35f60b24eaa6af65d99da902f590dd6f3d9b9380" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.537827 4910 generic.go:334] "Generic (PLEG): container finished" podID="e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e" containerID="32c0bb0052253f8dd2c344748d2eb39984ef98f99f8d9815e4bcd9f36d764353" exitCode=0 Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.537951 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pzbmf" event={"ID":"e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e","Type":"ContainerDied","Data":"32c0bb0052253f8dd2c344748d2eb39984ef98f99f8d9815e4bcd9f36d764353"} Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.538012 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pzbmf" event={"ID":"e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e","Type":"ContainerDied","Data":"fb58bf1adf8ea4e43e1d3966ba8c01bc94ae2842893145ffbf78e2f0d5ee6878"} Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.538162 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pzbmf" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.540917 4910 generic.go:334] "Generic (PLEG): container finished" podID="df73d562-aee4-4b56-b241-bd31f5c95714" containerID="11a17322adb2c5ff1ea5fe398d7f644f21e7b4480df5304859a2db118fe121f8" exitCode=0 Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.541301 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-llpdj" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.541317 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-llpdj" event={"ID":"df73d562-aee4-4b56-b241-bd31f5c95714","Type":"ContainerDied","Data":"11a17322adb2c5ff1ea5fe398d7f644f21e7b4480df5304859a2db118fe121f8"} Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.541762 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-llpdj" event={"ID":"df73d562-aee4-4b56-b241-bd31f5c95714","Type":"ContainerDied","Data":"9df8da22591fa7f0bda034dddf685adb748c74aea7bb7a0625b5cf016904f716"} Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.550393 4910 generic.go:334] "Generic (PLEG): container finished" podID="e67293c9-fc75-468d-b1c5-c09f9ad46dda" containerID="1de8eacb26cef6b46de2bc9cf5e247e88efa6be2b0bce9cd969f18aa75dd4c17" exitCode=0 Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.550447 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7tvk2" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.550447 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7tvk2" event={"ID":"e67293c9-fc75-468d-b1c5-c09f9ad46dda","Type":"ContainerDied","Data":"1de8eacb26cef6b46de2bc9cf5e247e88efa6be2b0bce9cd969f18aa75dd4c17"} Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.550529 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7tvk2" event={"ID":"e67293c9-fc75-468d-b1c5-c09f9ad46dda","Type":"ContainerDied","Data":"dcc3a3a2b74af73c57e86fa4a0fcd79b9a9b81245dc52e394ea1827c92cc0991"} Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.559021 4910 generic.go:334] "Generic (PLEG): container finished" podID="060b3be3-5d9d-47dc-a01e-7a79aa9f13b4" containerID="29971467a3dcb3c04d6a710368043b3869f40ed8b96ccb86c936ddf566fc632c" exitCode=0 Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.559157 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ghpct" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.559168 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ghpct" event={"ID":"060b3be3-5d9d-47dc-a01e-7a79aa9f13b4","Type":"ContainerDied","Data":"29971467a3dcb3c04d6a710368043b3869f40ed8b96ccb86c936ddf566fc632c"} Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.559242 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ghpct" event={"ID":"060b3be3-5d9d-47dc-a01e-7a79aa9f13b4","Type":"ContainerDied","Data":"9d0d4a97b86bcf515ce503089ccc693022f42998c56ef744db627fd0576101dd"} Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.562077 4910 scope.go:117] "RemoveContainer" containerID="98c820565e6cda88f268a07a35f60b24eaa6af65d99da902f590dd6f3d9b9380" Jan 05 21:56:01 crc kubenswrapper[4910]: E0105 21:56:01.562721 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98c820565e6cda88f268a07a35f60b24eaa6af65d99da902f590dd6f3d9b9380\": container with ID starting with 98c820565e6cda88f268a07a35f60b24eaa6af65d99da902f590dd6f3d9b9380 not found: ID does not exist" containerID="98c820565e6cda88f268a07a35f60b24eaa6af65d99da902f590dd6f3d9b9380" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.562788 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98c820565e6cda88f268a07a35f60b24eaa6af65d99da902f590dd6f3d9b9380"} err="failed to get container status \"98c820565e6cda88f268a07a35f60b24eaa6af65d99da902f590dd6f3d9b9380\": rpc error: code = NotFound desc = could not find container \"98c820565e6cda88f268a07a35f60b24eaa6af65d99da902f590dd6f3d9b9380\": container with ID starting with 98c820565e6cda88f268a07a35f60b24eaa6af65d99da902f590dd6f3d9b9380 not found: ID does not exist" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.562839 4910 scope.go:117] "RemoveContainer" containerID="32c0bb0052253f8dd2c344748d2eb39984ef98f99f8d9815e4bcd9f36d764353" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.563511 4910 generic.go:334] "Generic (PLEG): container finished" podID="340fecda-72dc-4870-887a-29b5ef58ae94" containerID="bd4e3409bf7d1d3b572fd967c5225bc3dee6e4b1ee3a4eda613979d62a9647ad" exitCode=0 Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.563569 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2hg8l" event={"ID":"340fecda-72dc-4870-887a-29b5ef58ae94","Type":"ContainerDied","Data":"bd4e3409bf7d1d3b572fd967c5225bc3dee6e4b1ee3a4eda613979d62a9647ad"} Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.563590 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2hg8l" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.563612 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2hg8l" event={"ID":"340fecda-72dc-4870-887a-29b5ef58ae94","Type":"ContainerDied","Data":"081053c12208e240cd041d0dd51d64b96b506715362717423904103744dc15c8"} Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.637520 4910 scope.go:117] "RemoveContainer" containerID="67a9ef8cc27153979357e9a345b762db7cbc41e4351199d0570991f38188d8fa" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.655982 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7tvk2"] Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.665029 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-7tvk2"] Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.669312 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-llpdj"] Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.672205 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-llpdj"] Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.685938 4910 scope.go:117] "RemoveContainer" containerID="a309e408734a42d7078f5bc3aec4bbca064fb3f412a82f1c4c92ee3c5f5f06f0" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.689135 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pzbmf"] Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.700651 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-pzbmf"] Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.711382 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ghpct"] Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.713736 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-ghpct"] Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.719270 4910 scope.go:117] "RemoveContainer" containerID="32c0bb0052253f8dd2c344748d2eb39984ef98f99f8d9815e4bcd9f36d764353" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.719407 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2hg8l"] Jan 05 21:56:01 crc kubenswrapper[4910]: E0105 21:56:01.720010 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"32c0bb0052253f8dd2c344748d2eb39984ef98f99f8d9815e4bcd9f36d764353\": container with ID starting with 32c0bb0052253f8dd2c344748d2eb39984ef98f99f8d9815e4bcd9f36d764353 not found: ID does not exist" containerID="32c0bb0052253f8dd2c344748d2eb39984ef98f99f8d9815e4bcd9f36d764353" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.720056 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"32c0bb0052253f8dd2c344748d2eb39984ef98f99f8d9815e4bcd9f36d764353"} err="failed to get container status \"32c0bb0052253f8dd2c344748d2eb39984ef98f99f8d9815e4bcd9f36d764353\": rpc error: code = NotFound desc = could not find container \"32c0bb0052253f8dd2c344748d2eb39984ef98f99f8d9815e4bcd9f36d764353\": container with ID starting with 32c0bb0052253f8dd2c344748d2eb39984ef98f99f8d9815e4bcd9f36d764353 not found: ID does not exist" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.720098 4910 scope.go:117] "RemoveContainer" containerID="67a9ef8cc27153979357e9a345b762db7cbc41e4351199d0570991f38188d8fa" Jan 05 21:56:01 crc kubenswrapper[4910]: E0105 21:56:01.720834 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"67a9ef8cc27153979357e9a345b762db7cbc41e4351199d0570991f38188d8fa\": container with ID starting with 67a9ef8cc27153979357e9a345b762db7cbc41e4351199d0570991f38188d8fa not found: ID does not exist" containerID="67a9ef8cc27153979357e9a345b762db7cbc41e4351199d0570991f38188d8fa" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.721898 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67a9ef8cc27153979357e9a345b762db7cbc41e4351199d0570991f38188d8fa"} err="failed to get container status \"67a9ef8cc27153979357e9a345b762db7cbc41e4351199d0570991f38188d8fa\": rpc error: code = NotFound desc = could not find container \"67a9ef8cc27153979357e9a345b762db7cbc41e4351199d0570991f38188d8fa\": container with ID starting with 67a9ef8cc27153979357e9a345b762db7cbc41e4351199d0570991f38188d8fa not found: ID does not exist" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.721955 4910 scope.go:117] "RemoveContainer" containerID="a309e408734a42d7078f5bc3aec4bbca064fb3f412a82f1c4c92ee3c5f5f06f0" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.722568 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-2hg8l"] Jan 05 21:56:01 crc kubenswrapper[4910]: E0105 21:56:01.722606 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a309e408734a42d7078f5bc3aec4bbca064fb3f412a82f1c4c92ee3c5f5f06f0\": container with ID starting with a309e408734a42d7078f5bc3aec4bbca064fb3f412a82f1c4c92ee3c5f5f06f0 not found: ID does not exist" containerID="a309e408734a42d7078f5bc3aec4bbca064fb3f412a82f1c4c92ee3c5f5f06f0" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.722758 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a309e408734a42d7078f5bc3aec4bbca064fb3f412a82f1c4c92ee3c5f5f06f0"} err="failed to get container status \"a309e408734a42d7078f5bc3aec4bbca064fb3f412a82f1c4c92ee3c5f5f06f0\": rpc error: code = NotFound desc = could not find container \"a309e408734a42d7078f5bc3aec4bbca064fb3f412a82f1c4c92ee3c5f5f06f0\": container with ID starting with a309e408734a42d7078f5bc3aec4bbca064fb3f412a82f1c4c92ee3c5f5f06f0 not found: ID does not exist" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.722790 4910 scope.go:117] "RemoveContainer" containerID="11a17322adb2c5ff1ea5fe398d7f644f21e7b4480df5304859a2db118fe121f8" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.740977 4910 scope.go:117] "RemoveContainer" containerID="11a17322adb2c5ff1ea5fe398d7f644f21e7b4480df5304859a2db118fe121f8" Jan 05 21:56:01 crc kubenswrapper[4910]: E0105 21:56:01.742467 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11a17322adb2c5ff1ea5fe398d7f644f21e7b4480df5304859a2db118fe121f8\": container with ID starting with 11a17322adb2c5ff1ea5fe398d7f644f21e7b4480df5304859a2db118fe121f8 not found: ID does not exist" containerID="11a17322adb2c5ff1ea5fe398d7f644f21e7b4480df5304859a2db118fe121f8" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.742510 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11a17322adb2c5ff1ea5fe398d7f644f21e7b4480df5304859a2db118fe121f8"} err="failed to get container status \"11a17322adb2c5ff1ea5fe398d7f644f21e7b4480df5304859a2db118fe121f8\": rpc error: code = NotFound desc = could not find container \"11a17322adb2c5ff1ea5fe398d7f644f21e7b4480df5304859a2db118fe121f8\": container with ID starting with 11a17322adb2c5ff1ea5fe398d7f644f21e7b4480df5304859a2db118fe121f8 not found: ID does not exist" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.742545 4910 scope.go:117] "RemoveContainer" containerID="1de8eacb26cef6b46de2bc9cf5e247e88efa6be2b0bce9cd969f18aa75dd4c17" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.758022 4910 scope.go:117] "RemoveContainer" containerID="236b3a0b6b653cbbb841b3d2bc4a4b5fa288b2e3ed97efa1ce46c334cf175393" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.778215 4910 scope.go:117] "RemoveContainer" containerID="cdbfba9fe0e865807ebc0dbe4bd01a9cb1d24ea84c2043d2cc54f3f70da09b15" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.797091 4910 scope.go:117] "RemoveContainer" containerID="1de8eacb26cef6b46de2bc9cf5e247e88efa6be2b0bce9cd969f18aa75dd4c17" Jan 05 21:56:01 crc kubenswrapper[4910]: E0105 21:56:01.797728 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1de8eacb26cef6b46de2bc9cf5e247e88efa6be2b0bce9cd969f18aa75dd4c17\": container with ID starting with 1de8eacb26cef6b46de2bc9cf5e247e88efa6be2b0bce9cd969f18aa75dd4c17 not found: ID does not exist" containerID="1de8eacb26cef6b46de2bc9cf5e247e88efa6be2b0bce9cd969f18aa75dd4c17" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.797793 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1de8eacb26cef6b46de2bc9cf5e247e88efa6be2b0bce9cd969f18aa75dd4c17"} err="failed to get container status \"1de8eacb26cef6b46de2bc9cf5e247e88efa6be2b0bce9cd969f18aa75dd4c17\": rpc error: code = NotFound desc = could not find container \"1de8eacb26cef6b46de2bc9cf5e247e88efa6be2b0bce9cd969f18aa75dd4c17\": container with ID starting with 1de8eacb26cef6b46de2bc9cf5e247e88efa6be2b0bce9cd969f18aa75dd4c17 not found: ID does not exist" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.797842 4910 scope.go:117] "RemoveContainer" containerID="236b3a0b6b653cbbb841b3d2bc4a4b5fa288b2e3ed97efa1ce46c334cf175393" Jan 05 21:56:01 crc kubenswrapper[4910]: E0105 21:56:01.798376 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"236b3a0b6b653cbbb841b3d2bc4a4b5fa288b2e3ed97efa1ce46c334cf175393\": container with ID starting with 236b3a0b6b653cbbb841b3d2bc4a4b5fa288b2e3ed97efa1ce46c334cf175393 not found: ID does not exist" containerID="236b3a0b6b653cbbb841b3d2bc4a4b5fa288b2e3ed97efa1ce46c334cf175393" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.798436 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"236b3a0b6b653cbbb841b3d2bc4a4b5fa288b2e3ed97efa1ce46c334cf175393"} err="failed to get container status \"236b3a0b6b653cbbb841b3d2bc4a4b5fa288b2e3ed97efa1ce46c334cf175393\": rpc error: code = NotFound desc = could not find container \"236b3a0b6b653cbbb841b3d2bc4a4b5fa288b2e3ed97efa1ce46c334cf175393\": container with ID starting with 236b3a0b6b653cbbb841b3d2bc4a4b5fa288b2e3ed97efa1ce46c334cf175393 not found: ID does not exist" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.798472 4910 scope.go:117] "RemoveContainer" containerID="cdbfba9fe0e865807ebc0dbe4bd01a9cb1d24ea84c2043d2cc54f3f70da09b15" Jan 05 21:56:01 crc kubenswrapper[4910]: E0105 21:56:01.798985 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cdbfba9fe0e865807ebc0dbe4bd01a9cb1d24ea84c2043d2cc54f3f70da09b15\": container with ID starting with cdbfba9fe0e865807ebc0dbe4bd01a9cb1d24ea84c2043d2cc54f3f70da09b15 not found: ID does not exist" containerID="cdbfba9fe0e865807ebc0dbe4bd01a9cb1d24ea84c2043d2cc54f3f70da09b15" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.799022 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cdbfba9fe0e865807ebc0dbe4bd01a9cb1d24ea84c2043d2cc54f3f70da09b15"} err="failed to get container status \"cdbfba9fe0e865807ebc0dbe4bd01a9cb1d24ea84c2043d2cc54f3f70da09b15\": rpc error: code = NotFound desc = could not find container \"cdbfba9fe0e865807ebc0dbe4bd01a9cb1d24ea84c2043d2cc54f3f70da09b15\": container with ID starting with cdbfba9fe0e865807ebc0dbe4bd01a9cb1d24ea84c2043d2cc54f3f70da09b15 not found: ID does not exist" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.799062 4910 scope.go:117] "RemoveContainer" containerID="29971467a3dcb3c04d6a710368043b3869f40ed8b96ccb86c936ddf566fc632c" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.821588 4910 scope.go:117] "RemoveContainer" containerID="ab50328fcc6803e3c5b0e3be08e19603836f1c6c27e45818153c2d050ce2fbbc" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.843067 4910 scope.go:117] "RemoveContainer" containerID="772a78573309727ad1a034b6ca1dd4290bfb901add28d3f53cafde3b78a60d2f" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.860659 4910 scope.go:117] "RemoveContainer" containerID="29971467a3dcb3c04d6a710368043b3869f40ed8b96ccb86c936ddf566fc632c" Jan 05 21:56:01 crc kubenswrapper[4910]: E0105 21:56:01.861396 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"29971467a3dcb3c04d6a710368043b3869f40ed8b96ccb86c936ddf566fc632c\": container with ID starting with 29971467a3dcb3c04d6a710368043b3869f40ed8b96ccb86c936ddf566fc632c not found: ID does not exist" containerID="29971467a3dcb3c04d6a710368043b3869f40ed8b96ccb86c936ddf566fc632c" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.861450 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29971467a3dcb3c04d6a710368043b3869f40ed8b96ccb86c936ddf566fc632c"} err="failed to get container status \"29971467a3dcb3c04d6a710368043b3869f40ed8b96ccb86c936ddf566fc632c\": rpc error: code = NotFound desc = could not find container \"29971467a3dcb3c04d6a710368043b3869f40ed8b96ccb86c936ddf566fc632c\": container with ID starting with 29971467a3dcb3c04d6a710368043b3869f40ed8b96ccb86c936ddf566fc632c not found: ID does not exist" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.861543 4910 scope.go:117] "RemoveContainer" containerID="ab50328fcc6803e3c5b0e3be08e19603836f1c6c27e45818153c2d050ce2fbbc" Jan 05 21:56:01 crc kubenswrapper[4910]: E0105 21:56:01.861986 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab50328fcc6803e3c5b0e3be08e19603836f1c6c27e45818153c2d050ce2fbbc\": container with ID starting with ab50328fcc6803e3c5b0e3be08e19603836f1c6c27e45818153c2d050ce2fbbc not found: ID does not exist" containerID="ab50328fcc6803e3c5b0e3be08e19603836f1c6c27e45818153c2d050ce2fbbc" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.862016 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab50328fcc6803e3c5b0e3be08e19603836f1c6c27e45818153c2d050ce2fbbc"} err="failed to get container status \"ab50328fcc6803e3c5b0e3be08e19603836f1c6c27e45818153c2d050ce2fbbc\": rpc error: code = NotFound desc = could not find container \"ab50328fcc6803e3c5b0e3be08e19603836f1c6c27e45818153c2d050ce2fbbc\": container with ID starting with ab50328fcc6803e3c5b0e3be08e19603836f1c6c27e45818153c2d050ce2fbbc not found: ID does not exist" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.862034 4910 scope.go:117] "RemoveContainer" containerID="772a78573309727ad1a034b6ca1dd4290bfb901add28d3f53cafde3b78a60d2f" Jan 05 21:56:01 crc kubenswrapper[4910]: E0105 21:56:01.862626 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"772a78573309727ad1a034b6ca1dd4290bfb901add28d3f53cafde3b78a60d2f\": container with ID starting with 772a78573309727ad1a034b6ca1dd4290bfb901add28d3f53cafde3b78a60d2f not found: ID does not exist" containerID="772a78573309727ad1a034b6ca1dd4290bfb901add28d3f53cafde3b78a60d2f" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.862656 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"772a78573309727ad1a034b6ca1dd4290bfb901add28d3f53cafde3b78a60d2f"} err="failed to get container status \"772a78573309727ad1a034b6ca1dd4290bfb901add28d3f53cafde3b78a60d2f\": rpc error: code = NotFound desc = could not find container \"772a78573309727ad1a034b6ca1dd4290bfb901add28d3f53cafde3b78a60d2f\": container with ID starting with 772a78573309727ad1a034b6ca1dd4290bfb901add28d3f53cafde3b78a60d2f not found: ID does not exist" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.862674 4910 scope.go:117] "RemoveContainer" containerID="bd4e3409bf7d1d3b572fd967c5225bc3dee6e4b1ee3a4eda613979d62a9647ad" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.879149 4910 scope.go:117] "RemoveContainer" containerID="ed035abb419cdb0e866ffb3df08920aead94f7a2e6623e780de7247aaa310095" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.900080 4910 scope.go:117] "RemoveContainer" containerID="5bb650890246204edae60743a4a55732e01aa7d7a93710aa3ba1556b62515f6a" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.919545 4910 scope.go:117] "RemoveContainer" containerID="bd4e3409bf7d1d3b572fd967c5225bc3dee6e4b1ee3a4eda613979d62a9647ad" Jan 05 21:56:01 crc kubenswrapper[4910]: E0105 21:56:01.920207 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd4e3409bf7d1d3b572fd967c5225bc3dee6e4b1ee3a4eda613979d62a9647ad\": container with ID starting with bd4e3409bf7d1d3b572fd967c5225bc3dee6e4b1ee3a4eda613979d62a9647ad not found: ID does not exist" containerID="bd4e3409bf7d1d3b572fd967c5225bc3dee6e4b1ee3a4eda613979d62a9647ad" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.920276 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd4e3409bf7d1d3b572fd967c5225bc3dee6e4b1ee3a4eda613979d62a9647ad"} err="failed to get container status \"bd4e3409bf7d1d3b572fd967c5225bc3dee6e4b1ee3a4eda613979d62a9647ad\": rpc error: code = NotFound desc = could not find container \"bd4e3409bf7d1d3b572fd967c5225bc3dee6e4b1ee3a4eda613979d62a9647ad\": container with ID starting with bd4e3409bf7d1d3b572fd967c5225bc3dee6e4b1ee3a4eda613979d62a9647ad not found: ID does not exist" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.920320 4910 scope.go:117] "RemoveContainer" containerID="ed035abb419cdb0e866ffb3df08920aead94f7a2e6623e780de7247aaa310095" Jan 05 21:56:01 crc kubenswrapper[4910]: E0105 21:56:01.921090 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed035abb419cdb0e866ffb3df08920aead94f7a2e6623e780de7247aaa310095\": container with ID starting with ed035abb419cdb0e866ffb3df08920aead94f7a2e6623e780de7247aaa310095 not found: ID does not exist" containerID="ed035abb419cdb0e866ffb3df08920aead94f7a2e6623e780de7247aaa310095" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.921165 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed035abb419cdb0e866ffb3df08920aead94f7a2e6623e780de7247aaa310095"} err="failed to get container status \"ed035abb419cdb0e866ffb3df08920aead94f7a2e6623e780de7247aaa310095\": rpc error: code = NotFound desc = could not find container \"ed035abb419cdb0e866ffb3df08920aead94f7a2e6623e780de7247aaa310095\": container with ID starting with ed035abb419cdb0e866ffb3df08920aead94f7a2e6623e780de7247aaa310095 not found: ID does not exist" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.921207 4910 scope.go:117] "RemoveContainer" containerID="5bb650890246204edae60743a4a55732e01aa7d7a93710aa3ba1556b62515f6a" Jan 05 21:56:01 crc kubenswrapper[4910]: E0105 21:56:01.921738 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5bb650890246204edae60743a4a55732e01aa7d7a93710aa3ba1556b62515f6a\": container with ID starting with 5bb650890246204edae60743a4a55732e01aa7d7a93710aa3ba1556b62515f6a not found: ID does not exist" containerID="5bb650890246204edae60743a4a55732e01aa7d7a93710aa3ba1556b62515f6a" Jan 05 21:56:01 crc kubenswrapper[4910]: I0105 21:56:01.921779 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5bb650890246204edae60743a4a55732e01aa7d7a93710aa3ba1556b62515f6a"} err="failed to get container status \"5bb650890246204edae60743a4a55732e01aa7d7a93710aa3ba1556b62515f6a\": rpc error: code = NotFound desc = could not find container \"5bb650890246204edae60743a4a55732e01aa7d7a93710aa3ba1556b62515f6a\": container with ID starting with 5bb650890246204edae60743a4a55732e01aa7d7a93710aa3ba1556b62515f6a not found: ID does not exist" Jan 05 21:56:02 crc kubenswrapper[4910]: I0105 21:56:02.730700 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="060b3be3-5d9d-47dc-a01e-7a79aa9f13b4" path="/var/lib/kubelet/pods/060b3be3-5d9d-47dc-a01e-7a79aa9f13b4/volumes" Jan 05 21:56:02 crc kubenswrapper[4910]: I0105 21:56:02.732758 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="340fecda-72dc-4870-887a-29b5ef58ae94" path="/var/lib/kubelet/pods/340fecda-72dc-4870-887a-29b5ef58ae94/volumes" Jan 05 21:56:02 crc kubenswrapper[4910]: I0105 21:56:02.734435 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="df73d562-aee4-4b56-b241-bd31f5c95714" path="/var/lib/kubelet/pods/df73d562-aee4-4b56-b241-bd31f5c95714/volumes" Jan 05 21:56:02 crc kubenswrapper[4910]: I0105 21:56:02.736424 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e67293c9-fc75-468d-b1c5-c09f9ad46dda" path="/var/lib/kubelet/pods/e67293c9-fc75-468d-b1c5-c09f9ad46dda/volumes" Jan 05 21:56:02 crc kubenswrapper[4910]: I0105 21:56:02.737657 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e" path="/var/lib/kubelet/pods/e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e/volumes" Jan 05 21:56:02 crc kubenswrapper[4910]: I0105 21:56:02.739255 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Jan 05 21:56:02 crc kubenswrapper[4910]: I0105 21:56:02.739961 4910 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Jan 05 21:56:02 crc kubenswrapper[4910]: I0105 21:56:02.750985 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 05 21:56:02 crc kubenswrapper[4910]: I0105 21:56:02.751032 4910 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="9ba87011-e834-4d40-9a99-497f76b8dcf3" Jan 05 21:56:02 crc kubenswrapper[4910]: I0105 21:56:02.754519 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 05 21:56:02 crc kubenswrapper[4910]: I0105 21:56:02.754559 4910 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="9ba87011-e834-4d40-9a99-497f76b8dcf3" Jan 05 21:56:21 crc kubenswrapper[4910]: I0105 21:56:21.732157 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 05 21:56:26 crc kubenswrapper[4910]: I0105 21:56:26.717767 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Jan 05 21:56:26 crc kubenswrapper[4910]: I0105 21:56:26.721577 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 05 21:56:26 crc kubenswrapper[4910]: I0105 21:56:26.721627 4910 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="199c08933606db3a7ffebc792f9dded017d835f8bece24f165885d1d85b2e554" exitCode=137 Jan 05 21:56:26 crc kubenswrapper[4910]: I0105 21:56:26.731648 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"199c08933606db3a7ffebc792f9dded017d835f8bece24f165885d1d85b2e554"} Jan 05 21:56:26 crc kubenswrapper[4910]: I0105 21:56:26.731699 4910 scope.go:117] "RemoveContainer" containerID="02a80a102f06c71cfb7a348383b8f1df6a47f7939ddbfb69a0a2fba9bab2fa7e" Jan 05 21:56:27 crc kubenswrapper[4910]: I0105 21:56:27.730972 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Jan 05 21:56:27 crc kubenswrapper[4910]: I0105 21:56:27.732364 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"41b6775af6adfe856580a4d97a8614ab3bc90a65bea9bd0b9e99f33ac8a6f0c7"} Jan 05 21:56:36 crc kubenswrapper[4910]: I0105 21:56:36.026591 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 05 21:56:36 crc kubenswrapper[4910]: I0105 21:56:36.466365 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 05 21:56:36 crc kubenswrapper[4910]: I0105 21:56:36.470538 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 05 21:56:37 crc kubenswrapper[4910]: I0105 21:56:37.799064 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.604736 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-zq4vl"] Jan 05 21:56:43 crc kubenswrapper[4910]: E0105 21:56:43.605562 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e" containerName="extract-content" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.605574 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e" containerName="extract-content" Jan 05 21:56:43 crc kubenswrapper[4910]: E0105 21:56:43.605587 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="340fecda-72dc-4870-887a-29b5ef58ae94" containerName="extract-content" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.605593 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="340fecda-72dc-4870-887a-29b5ef58ae94" containerName="extract-content" Jan 05 21:56:43 crc kubenswrapper[4910]: E0105 21:56:43.605603 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e" containerName="registry-server" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.605609 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e" containerName="registry-server" Jan 05 21:56:43 crc kubenswrapper[4910]: E0105 21:56:43.605617 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff686cfa-03a7-4c78-8efc-17407e5e79c0" containerName="installer" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.605623 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff686cfa-03a7-4c78-8efc-17407e5e79c0" containerName="installer" Jan 05 21:56:43 crc kubenswrapper[4910]: E0105 21:56:43.605632 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="340fecda-72dc-4870-887a-29b5ef58ae94" containerName="extract-utilities" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.605638 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="340fecda-72dc-4870-887a-29b5ef58ae94" containerName="extract-utilities" Jan 05 21:56:43 crc kubenswrapper[4910]: E0105 21:56:43.605645 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="060b3be3-5d9d-47dc-a01e-7a79aa9f13b4" containerName="extract-content" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.605651 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="060b3be3-5d9d-47dc-a01e-7a79aa9f13b4" containerName="extract-content" Jan 05 21:56:43 crc kubenswrapper[4910]: E0105 21:56:43.605658 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e67293c9-fc75-468d-b1c5-c09f9ad46dda" containerName="extract-content" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.605664 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="e67293c9-fc75-468d-b1c5-c09f9ad46dda" containerName="extract-content" Jan 05 21:56:43 crc kubenswrapper[4910]: E0105 21:56:43.605672 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e" containerName="extract-utilities" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.605702 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e" containerName="extract-utilities" Jan 05 21:56:43 crc kubenswrapper[4910]: E0105 21:56:43.605712 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e67293c9-fc75-468d-b1c5-c09f9ad46dda" containerName="extract-utilities" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.605717 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="e67293c9-fc75-468d-b1c5-c09f9ad46dda" containerName="extract-utilities" Jan 05 21:56:43 crc kubenswrapper[4910]: E0105 21:56:43.605724 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e67293c9-fc75-468d-b1c5-c09f9ad46dda" containerName="registry-server" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.605730 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="e67293c9-fc75-468d-b1c5-c09f9ad46dda" containerName="registry-server" Jan 05 21:56:43 crc kubenswrapper[4910]: E0105 21:56:43.605736 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="060b3be3-5d9d-47dc-a01e-7a79aa9f13b4" containerName="registry-server" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.605742 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="060b3be3-5d9d-47dc-a01e-7a79aa9f13b4" containerName="registry-server" Jan 05 21:56:43 crc kubenswrapper[4910]: E0105 21:56:43.605749 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="340fecda-72dc-4870-887a-29b5ef58ae94" containerName="registry-server" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.605754 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="340fecda-72dc-4870-887a-29b5ef58ae94" containerName="registry-server" Jan 05 21:56:43 crc kubenswrapper[4910]: E0105 21:56:43.605764 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df73d562-aee4-4b56-b241-bd31f5c95714" containerName="marketplace-operator" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.605770 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="df73d562-aee4-4b56-b241-bd31f5c95714" containerName="marketplace-operator" Jan 05 21:56:43 crc kubenswrapper[4910]: E0105 21:56:43.605780 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="060b3be3-5d9d-47dc-a01e-7a79aa9f13b4" containerName="extract-utilities" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.605786 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="060b3be3-5d9d-47dc-a01e-7a79aa9f13b4" containerName="extract-utilities" Jan 05 21:56:43 crc kubenswrapper[4910]: E0105 21:56:43.605814 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.605820 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.605901 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="e67293c9-fc75-468d-b1c5-c09f9ad46dda" containerName="registry-server" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.605914 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff686cfa-03a7-4c78-8efc-17407e5e79c0" containerName="installer" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.605923 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.605930 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="340fecda-72dc-4870-887a-29b5ef58ae94" containerName="registry-server" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.605939 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="df73d562-aee4-4b56-b241-bd31f5c95714" containerName="marketplace-operator" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.605948 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="060b3be3-5d9d-47dc-a01e-7a79aa9f13b4" containerName="registry-server" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.605956 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8eb66cc-3e51-4bdd-ab74-6e19c8c9737e" containerName="registry-server" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.606359 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-zq4vl" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.616570 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.617076 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.617261 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.618731 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.629534 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-zq4vl"] Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.629631 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.684829 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ql68x\" (UniqueName: \"kubernetes.io/projected/9e1a2196-6cd9-49e7-88b2-4e886ce030b4-kube-api-access-ql68x\") pod \"marketplace-operator-79b997595-zq4vl\" (UID: \"9e1a2196-6cd9-49e7-88b2-4e886ce030b4\") " pod="openshift-marketplace/marketplace-operator-79b997595-zq4vl" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.684886 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9e1a2196-6cd9-49e7-88b2-4e886ce030b4-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-zq4vl\" (UID: \"9e1a2196-6cd9-49e7-88b2-4e886ce030b4\") " pod="openshift-marketplace/marketplace-operator-79b997595-zq4vl" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.684945 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/9e1a2196-6cd9-49e7-88b2-4e886ce030b4-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-zq4vl\" (UID: \"9e1a2196-6cd9-49e7-88b2-4e886ce030b4\") " pod="openshift-marketplace/marketplace-operator-79b997595-zq4vl" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.731073 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-87487f679-vbkmz"] Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.731390 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-87487f679-vbkmz" podUID="8730029b-50dc-4410-bfa7-c76b57ac41be" containerName="controller-manager" containerID="cri-o://6ca020f6f3db27964bad9ad04d394685c2d585d97a3b456341ac79ca61388798" gracePeriod=30 Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.742627 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7f475d6877-ww2dz"] Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.742885 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-7f475d6877-ww2dz" podUID="72caeca9-12a4-49ea-bacf-9aaa07f625e5" containerName="route-controller-manager" containerID="cri-o://3a8286f6e1c7db354f72e1df7e2160f4986b3cd70177a35567ed677e4ce75423" gracePeriod=30 Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.786417 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/9e1a2196-6cd9-49e7-88b2-4e886ce030b4-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-zq4vl\" (UID: \"9e1a2196-6cd9-49e7-88b2-4e886ce030b4\") " pod="openshift-marketplace/marketplace-operator-79b997595-zq4vl" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.786581 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ql68x\" (UniqueName: \"kubernetes.io/projected/9e1a2196-6cd9-49e7-88b2-4e886ce030b4-kube-api-access-ql68x\") pod \"marketplace-operator-79b997595-zq4vl\" (UID: \"9e1a2196-6cd9-49e7-88b2-4e886ce030b4\") " pod="openshift-marketplace/marketplace-operator-79b997595-zq4vl" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.786631 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9e1a2196-6cd9-49e7-88b2-4e886ce030b4-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-zq4vl\" (UID: \"9e1a2196-6cd9-49e7-88b2-4e886ce030b4\") " pod="openshift-marketplace/marketplace-operator-79b997595-zq4vl" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.788450 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9e1a2196-6cd9-49e7-88b2-4e886ce030b4-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-zq4vl\" (UID: \"9e1a2196-6cd9-49e7-88b2-4e886ce030b4\") " pod="openshift-marketplace/marketplace-operator-79b997595-zq4vl" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.794262 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/9e1a2196-6cd9-49e7-88b2-4e886ce030b4-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-zq4vl\" (UID: \"9e1a2196-6cd9-49e7-88b2-4e886ce030b4\") " pod="openshift-marketplace/marketplace-operator-79b997595-zq4vl" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.811015 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ql68x\" (UniqueName: \"kubernetes.io/projected/9e1a2196-6cd9-49e7-88b2-4e886ce030b4-kube-api-access-ql68x\") pod \"marketplace-operator-79b997595-zq4vl\" (UID: \"9e1a2196-6cd9-49e7-88b2-4e886ce030b4\") " pod="openshift-marketplace/marketplace-operator-79b997595-zq4vl" Jan 05 21:56:43 crc kubenswrapper[4910]: I0105 21:56:43.924087 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-zq4vl" Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.135955 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7f475d6877-ww2dz" Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.166465 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-87487f679-vbkmz" Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.193914 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/72caeca9-12a4-49ea-bacf-9aaa07f625e5-serving-cert\") pod \"72caeca9-12a4-49ea-bacf-9aaa07f625e5\" (UID: \"72caeca9-12a4-49ea-bacf-9aaa07f625e5\") " Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.193960 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8730029b-50dc-4410-bfa7-c76b57ac41be-client-ca\") pod \"8730029b-50dc-4410-bfa7-c76b57ac41be\" (UID: \"8730029b-50dc-4410-bfa7-c76b57ac41be\") " Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.194002 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-78mw6\" (UniqueName: \"kubernetes.io/projected/8730029b-50dc-4410-bfa7-c76b57ac41be-kube-api-access-78mw6\") pod \"8730029b-50dc-4410-bfa7-c76b57ac41be\" (UID: \"8730029b-50dc-4410-bfa7-c76b57ac41be\") " Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.194034 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8730029b-50dc-4410-bfa7-c76b57ac41be-proxy-ca-bundles\") pod \"8730029b-50dc-4410-bfa7-c76b57ac41be\" (UID: \"8730029b-50dc-4410-bfa7-c76b57ac41be\") " Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.194061 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8730029b-50dc-4410-bfa7-c76b57ac41be-config\") pod \"8730029b-50dc-4410-bfa7-c76b57ac41be\" (UID: \"8730029b-50dc-4410-bfa7-c76b57ac41be\") " Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.194084 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/72caeca9-12a4-49ea-bacf-9aaa07f625e5-client-ca\") pod \"72caeca9-12a4-49ea-bacf-9aaa07f625e5\" (UID: \"72caeca9-12a4-49ea-bacf-9aaa07f625e5\") " Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.194129 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72caeca9-12a4-49ea-bacf-9aaa07f625e5-config\") pod \"72caeca9-12a4-49ea-bacf-9aaa07f625e5\" (UID: \"72caeca9-12a4-49ea-bacf-9aaa07f625e5\") " Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.194154 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8730029b-50dc-4410-bfa7-c76b57ac41be-serving-cert\") pod \"8730029b-50dc-4410-bfa7-c76b57ac41be\" (UID: \"8730029b-50dc-4410-bfa7-c76b57ac41be\") " Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.194168 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tdk42\" (UniqueName: \"kubernetes.io/projected/72caeca9-12a4-49ea-bacf-9aaa07f625e5-kube-api-access-tdk42\") pod \"72caeca9-12a4-49ea-bacf-9aaa07f625e5\" (UID: \"72caeca9-12a4-49ea-bacf-9aaa07f625e5\") " Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.195480 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8730029b-50dc-4410-bfa7-c76b57ac41be-client-ca" (OuterVolumeSpecName: "client-ca") pod "8730029b-50dc-4410-bfa7-c76b57ac41be" (UID: "8730029b-50dc-4410-bfa7-c76b57ac41be"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.195520 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8730029b-50dc-4410-bfa7-c76b57ac41be-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "8730029b-50dc-4410-bfa7-c76b57ac41be" (UID: "8730029b-50dc-4410-bfa7-c76b57ac41be"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.195705 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8730029b-50dc-4410-bfa7-c76b57ac41be-config" (OuterVolumeSpecName: "config") pod "8730029b-50dc-4410-bfa7-c76b57ac41be" (UID: "8730029b-50dc-4410-bfa7-c76b57ac41be"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.197699 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/72caeca9-12a4-49ea-bacf-9aaa07f625e5-client-ca" (OuterVolumeSpecName: "client-ca") pod "72caeca9-12a4-49ea-bacf-9aaa07f625e5" (UID: "72caeca9-12a4-49ea-bacf-9aaa07f625e5"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.197807 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/72caeca9-12a4-49ea-bacf-9aaa07f625e5-config" (OuterVolumeSpecName: "config") pod "72caeca9-12a4-49ea-bacf-9aaa07f625e5" (UID: "72caeca9-12a4-49ea-bacf-9aaa07f625e5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.199294 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72caeca9-12a4-49ea-bacf-9aaa07f625e5-kube-api-access-tdk42" (OuterVolumeSpecName: "kube-api-access-tdk42") pod "72caeca9-12a4-49ea-bacf-9aaa07f625e5" (UID: "72caeca9-12a4-49ea-bacf-9aaa07f625e5"). InnerVolumeSpecName "kube-api-access-tdk42". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.199434 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8730029b-50dc-4410-bfa7-c76b57ac41be-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8730029b-50dc-4410-bfa7-c76b57ac41be" (UID: "8730029b-50dc-4410-bfa7-c76b57ac41be"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.199732 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8730029b-50dc-4410-bfa7-c76b57ac41be-kube-api-access-78mw6" (OuterVolumeSpecName: "kube-api-access-78mw6") pod "8730029b-50dc-4410-bfa7-c76b57ac41be" (UID: "8730029b-50dc-4410-bfa7-c76b57ac41be"). InnerVolumeSpecName "kube-api-access-78mw6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.202235 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72caeca9-12a4-49ea-bacf-9aaa07f625e5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "72caeca9-12a4-49ea-bacf-9aaa07f625e5" (UID: "72caeca9-12a4-49ea-bacf-9aaa07f625e5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.295859 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72caeca9-12a4-49ea-bacf-9aaa07f625e5-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.295895 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8730029b-50dc-4410-bfa7-c76b57ac41be-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.295910 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tdk42\" (UniqueName: \"kubernetes.io/projected/72caeca9-12a4-49ea-bacf-9aaa07f625e5-kube-api-access-tdk42\") on node \"crc\" DevicePath \"\"" Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.295920 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/72caeca9-12a4-49ea-bacf-9aaa07f625e5-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.295928 4910 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8730029b-50dc-4410-bfa7-c76b57ac41be-client-ca\") on node \"crc\" DevicePath \"\"" Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.295936 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-78mw6\" (UniqueName: \"kubernetes.io/projected/8730029b-50dc-4410-bfa7-c76b57ac41be-kube-api-access-78mw6\") on node \"crc\" DevicePath \"\"" Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.295945 4910 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8730029b-50dc-4410-bfa7-c76b57ac41be-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.295954 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8730029b-50dc-4410-bfa7-c76b57ac41be-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.295964 4910 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/72caeca9-12a4-49ea-bacf-9aaa07f625e5-client-ca\") on node \"crc\" DevicePath \"\"" Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.347515 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-zq4vl"] Jan 05 21:56:44 crc kubenswrapper[4910]: W0105 21:56:44.356137 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9e1a2196_6cd9_49e7_88b2_4e886ce030b4.slice/crio-fdddcf29bcaf1322fcf33c4541575011d1505c321fdbad2276851f6447d79b7c WatchSource:0}: Error finding container fdddcf29bcaf1322fcf33c4541575011d1505c321fdbad2276851f6447d79b7c: Status 404 returned error can't find the container with id fdddcf29bcaf1322fcf33c4541575011d1505c321fdbad2276851f6447d79b7c Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.832902 4910 generic.go:334] "Generic (PLEG): container finished" podID="72caeca9-12a4-49ea-bacf-9aaa07f625e5" containerID="3a8286f6e1c7db354f72e1df7e2160f4986b3cd70177a35567ed677e4ce75423" exitCode=0 Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.833002 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7f475d6877-ww2dz" Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.833042 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7f475d6877-ww2dz" event={"ID":"72caeca9-12a4-49ea-bacf-9aaa07f625e5","Type":"ContainerDied","Data":"3a8286f6e1c7db354f72e1df7e2160f4986b3cd70177a35567ed677e4ce75423"} Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.833093 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7f475d6877-ww2dz" event={"ID":"72caeca9-12a4-49ea-bacf-9aaa07f625e5","Type":"ContainerDied","Data":"aa60563c72ca22000ba77f2d043450cb1c90808ec0b46609bdba168071c8826e"} Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.833130 4910 scope.go:117] "RemoveContainer" containerID="3a8286f6e1c7db354f72e1df7e2160f4986b3cd70177a35567ed677e4ce75423" Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.834254 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-zq4vl" event={"ID":"9e1a2196-6cd9-49e7-88b2-4e886ce030b4","Type":"ContainerStarted","Data":"8a1f1e4237f2d0857c68428183c51347bbcc1aeb7afc8a2ddaf8f20916b68e94"} Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.834281 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-zq4vl" event={"ID":"9e1a2196-6cd9-49e7-88b2-4e886ce030b4","Type":"ContainerStarted","Data":"fdddcf29bcaf1322fcf33c4541575011d1505c321fdbad2276851f6447d79b7c"} Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.834756 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-zq4vl" Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.838739 4910 generic.go:334] "Generic (PLEG): container finished" podID="8730029b-50dc-4410-bfa7-c76b57ac41be" containerID="6ca020f6f3db27964bad9ad04d394685c2d585d97a3b456341ac79ca61388798" exitCode=0 Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.838786 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-87487f679-vbkmz" event={"ID":"8730029b-50dc-4410-bfa7-c76b57ac41be","Type":"ContainerDied","Data":"6ca020f6f3db27964bad9ad04d394685c2d585d97a3b456341ac79ca61388798"} Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.838815 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-87487f679-vbkmz" event={"ID":"8730029b-50dc-4410-bfa7-c76b57ac41be","Type":"ContainerDied","Data":"a0bd6cf942bb0f5f6ae3df2083af77ac0266e75ef22b3a426e4a1863b60f8dbd"} Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.838822 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-87487f679-vbkmz" Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.850663 4910 scope.go:117] "RemoveContainer" containerID="3a8286f6e1c7db354f72e1df7e2160f4986b3cd70177a35567ed677e4ce75423" Jan 05 21:56:44 crc kubenswrapper[4910]: E0105 21:56:44.851203 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3a8286f6e1c7db354f72e1df7e2160f4986b3cd70177a35567ed677e4ce75423\": container with ID starting with 3a8286f6e1c7db354f72e1df7e2160f4986b3cd70177a35567ed677e4ce75423 not found: ID does not exist" containerID="3a8286f6e1c7db354f72e1df7e2160f4986b3cd70177a35567ed677e4ce75423" Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.851240 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a8286f6e1c7db354f72e1df7e2160f4986b3cd70177a35567ed677e4ce75423"} err="failed to get container status \"3a8286f6e1c7db354f72e1df7e2160f4986b3cd70177a35567ed677e4ce75423\": rpc error: code = NotFound desc = could not find container \"3a8286f6e1c7db354f72e1df7e2160f4986b3cd70177a35567ed677e4ce75423\": container with ID starting with 3a8286f6e1c7db354f72e1df7e2160f4986b3cd70177a35567ed677e4ce75423 not found: ID does not exist" Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.851263 4910 scope.go:117] "RemoveContainer" containerID="6ca020f6f3db27964bad9ad04d394685c2d585d97a3b456341ac79ca61388798" Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.853065 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-zq4vl" Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.875396 4910 scope.go:117] "RemoveContainer" containerID="6ca020f6f3db27964bad9ad04d394685c2d585d97a3b456341ac79ca61388798" Jan 05 21:56:44 crc kubenswrapper[4910]: E0105 21:56:44.876031 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ca020f6f3db27964bad9ad04d394685c2d585d97a3b456341ac79ca61388798\": container with ID starting with 6ca020f6f3db27964bad9ad04d394685c2d585d97a3b456341ac79ca61388798 not found: ID does not exist" containerID="6ca020f6f3db27964bad9ad04d394685c2d585d97a3b456341ac79ca61388798" Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.876081 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ca020f6f3db27964bad9ad04d394685c2d585d97a3b456341ac79ca61388798"} err="failed to get container status \"6ca020f6f3db27964bad9ad04d394685c2d585d97a3b456341ac79ca61388798\": rpc error: code = NotFound desc = could not find container \"6ca020f6f3db27964bad9ad04d394685c2d585d97a3b456341ac79ca61388798\": container with ID starting with 6ca020f6f3db27964bad9ad04d394685c2d585d97a3b456341ac79ca61388798 not found: ID does not exist" Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.879106 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-zq4vl" podStartSLOduration=1.8790813119999998 podStartE2EDuration="1.879081312s" podCreationTimestamp="2026-01-05 21:56:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:56:44.865186944 +0000 UTC m=+336.442684614" watchObservedRunningTime="2026-01-05 21:56:44.879081312 +0000 UTC m=+336.456578982" Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.880817 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7f475d6877-ww2dz"] Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.884810 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7f475d6877-ww2dz"] Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.888230 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-87487f679-vbkmz"] Jan 05 21:56:44 crc kubenswrapper[4910]: I0105 21:56:44.891088 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-87487f679-vbkmz"] Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.014780 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-77bbdf4988-ghrc9"] Jan 05 21:56:45 crc kubenswrapper[4910]: E0105 21:56:45.015037 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8730029b-50dc-4410-bfa7-c76b57ac41be" containerName="controller-manager" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.015050 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="8730029b-50dc-4410-bfa7-c76b57ac41be" containerName="controller-manager" Jan 05 21:56:45 crc kubenswrapper[4910]: E0105 21:56:45.015068 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72caeca9-12a4-49ea-bacf-9aaa07f625e5" containerName="route-controller-manager" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.015074 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="72caeca9-12a4-49ea-bacf-9aaa07f625e5" containerName="route-controller-manager" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.015201 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="72caeca9-12a4-49ea-bacf-9aaa07f625e5" containerName="route-controller-manager" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.015211 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="8730029b-50dc-4410-bfa7-c76b57ac41be" containerName="controller-manager" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.015644 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-77bbdf4988-ghrc9" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.017789 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.018772 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.020082 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.020670 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.020843 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.021484 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-64f4f8899d-s2ccg"] Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.022258 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-64f4f8899d-s2ccg" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.023373 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.023727 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.023962 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.024024 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.024277 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.024557 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.024817 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.026432 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-77bbdf4988-ghrc9"] Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.039091 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-64f4f8899d-s2ccg"] Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.040568 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.107153 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2fc29a28-fa62-47b4-8a6d-bf4860de462e-serving-cert\") pod \"controller-manager-64f4f8899d-s2ccg\" (UID: \"2fc29a28-fa62-47b4-8a6d-bf4860de462e\") " pod="openshift-controller-manager/controller-manager-64f4f8899d-s2ccg" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.107556 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/46c1b1b6-4117-47d1-ae0f-5a00e9160b96-client-ca\") pod \"route-controller-manager-77bbdf4988-ghrc9\" (UID: \"46c1b1b6-4117-47d1-ae0f-5a00e9160b96\") " pod="openshift-route-controller-manager/route-controller-manager-77bbdf4988-ghrc9" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.107604 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/46c1b1b6-4117-47d1-ae0f-5a00e9160b96-config\") pod \"route-controller-manager-77bbdf4988-ghrc9\" (UID: \"46c1b1b6-4117-47d1-ae0f-5a00e9160b96\") " pod="openshift-route-controller-manager/route-controller-manager-77bbdf4988-ghrc9" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.107632 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/46c1b1b6-4117-47d1-ae0f-5a00e9160b96-serving-cert\") pod \"route-controller-manager-77bbdf4988-ghrc9\" (UID: \"46c1b1b6-4117-47d1-ae0f-5a00e9160b96\") " pod="openshift-route-controller-manager/route-controller-manager-77bbdf4988-ghrc9" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.107656 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2fc29a28-fa62-47b4-8a6d-bf4860de462e-client-ca\") pod \"controller-manager-64f4f8899d-s2ccg\" (UID: \"2fc29a28-fa62-47b4-8a6d-bf4860de462e\") " pod="openshift-controller-manager/controller-manager-64f4f8899d-s2ccg" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.107693 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5wlx8\" (UniqueName: \"kubernetes.io/projected/46c1b1b6-4117-47d1-ae0f-5a00e9160b96-kube-api-access-5wlx8\") pod \"route-controller-manager-77bbdf4988-ghrc9\" (UID: \"46c1b1b6-4117-47d1-ae0f-5a00e9160b96\") " pod="openshift-route-controller-manager/route-controller-manager-77bbdf4988-ghrc9" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.107725 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2fc29a28-fa62-47b4-8a6d-bf4860de462e-proxy-ca-bundles\") pod \"controller-manager-64f4f8899d-s2ccg\" (UID: \"2fc29a28-fa62-47b4-8a6d-bf4860de462e\") " pod="openshift-controller-manager/controller-manager-64f4f8899d-s2ccg" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.107752 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2fc29a28-fa62-47b4-8a6d-bf4860de462e-config\") pod \"controller-manager-64f4f8899d-s2ccg\" (UID: \"2fc29a28-fa62-47b4-8a6d-bf4860de462e\") " pod="openshift-controller-manager/controller-manager-64f4f8899d-s2ccg" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.107813 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9cwcc\" (UniqueName: \"kubernetes.io/projected/2fc29a28-fa62-47b4-8a6d-bf4860de462e-kube-api-access-9cwcc\") pod \"controller-manager-64f4f8899d-s2ccg\" (UID: \"2fc29a28-fa62-47b4-8a6d-bf4860de462e\") " pod="openshift-controller-manager/controller-manager-64f4f8899d-s2ccg" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.208624 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/46c1b1b6-4117-47d1-ae0f-5a00e9160b96-config\") pod \"route-controller-manager-77bbdf4988-ghrc9\" (UID: \"46c1b1b6-4117-47d1-ae0f-5a00e9160b96\") " pod="openshift-route-controller-manager/route-controller-manager-77bbdf4988-ghrc9" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.208670 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/46c1b1b6-4117-47d1-ae0f-5a00e9160b96-serving-cert\") pod \"route-controller-manager-77bbdf4988-ghrc9\" (UID: \"46c1b1b6-4117-47d1-ae0f-5a00e9160b96\") " pod="openshift-route-controller-manager/route-controller-manager-77bbdf4988-ghrc9" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.208690 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2fc29a28-fa62-47b4-8a6d-bf4860de462e-client-ca\") pod \"controller-manager-64f4f8899d-s2ccg\" (UID: \"2fc29a28-fa62-47b4-8a6d-bf4860de462e\") " pod="openshift-controller-manager/controller-manager-64f4f8899d-s2ccg" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.208716 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5wlx8\" (UniqueName: \"kubernetes.io/projected/46c1b1b6-4117-47d1-ae0f-5a00e9160b96-kube-api-access-5wlx8\") pod \"route-controller-manager-77bbdf4988-ghrc9\" (UID: \"46c1b1b6-4117-47d1-ae0f-5a00e9160b96\") " pod="openshift-route-controller-manager/route-controller-manager-77bbdf4988-ghrc9" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.208741 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2fc29a28-fa62-47b4-8a6d-bf4860de462e-proxy-ca-bundles\") pod \"controller-manager-64f4f8899d-s2ccg\" (UID: \"2fc29a28-fa62-47b4-8a6d-bf4860de462e\") " pod="openshift-controller-manager/controller-manager-64f4f8899d-s2ccg" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.208759 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2fc29a28-fa62-47b4-8a6d-bf4860de462e-config\") pod \"controller-manager-64f4f8899d-s2ccg\" (UID: \"2fc29a28-fa62-47b4-8a6d-bf4860de462e\") " pod="openshift-controller-manager/controller-manager-64f4f8899d-s2ccg" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.208778 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9cwcc\" (UniqueName: \"kubernetes.io/projected/2fc29a28-fa62-47b4-8a6d-bf4860de462e-kube-api-access-9cwcc\") pod \"controller-manager-64f4f8899d-s2ccg\" (UID: \"2fc29a28-fa62-47b4-8a6d-bf4860de462e\") " pod="openshift-controller-manager/controller-manager-64f4f8899d-s2ccg" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.208820 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2fc29a28-fa62-47b4-8a6d-bf4860de462e-serving-cert\") pod \"controller-manager-64f4f8899d-s2ccg\" (UID: \"2fc29a28-fa62-47b4-8a6d-bf4860de462e\") " pod="openshift-controller-manager/controller-manager-64f4f8899d-s2ccg" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.209230 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/46c1b1b6-4117-47d1-ae0f-5a00e9160b96-client-ca\") pod \"route-controller-manager-77bbdf4988-ghrc9\" (UID: \"46c1b1b6-4117-47d1-ae0f-5a00e9160b96\") " pod="openshift-route-controller-manager/route-controller-manager-77bbdf4988-ghrc9" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.210102 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2fc29a28-fa62-47b4-8a6d-bf4860de462e-client-ca\") pod \"controller-manager-64f4f8899d-s2ccg\" (UID: \"2fc29a28-fa62-47b4-8a6d-bf4860de462e\") " pod="openshift-controller-manager/controller-manager-64f4f8899d-s2ccg" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.210212 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/46c1b1b6-4117-47d1-ae0f-5a00e9160b96-client-ca\") pod \"route-controller-manager-77bbdf4988-ghrc9\" (UID: \"46c1b1b6-4117-47d1-ae0f-5a00e9160b96\") " pod="openshift-route-controller-manager/route-controller-manager-77bbdf4988-ghrc9" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.210562 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/46c1b1b6-4117-47d1-ae0f-5a00e9160b96-config\") pod \"route-controller-manager-77bbdf4988-ghrc9\" (UID: \"46c1b1b6-4117-47d1-ae0f-5a00e9160b96\") " pod="openshift-route-controller-manager/route-controller-manager-77bbdf4988-ghrc9" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.210599 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2fc29a28-fa62-47b4-8a6d-bf4860de462e-config\") pod \"controller-manager-64f4f8899d-s2ccg\" (UID: \"2fc29a28-fa62-47b4-8a6d-bf4860de462e\") " pod="openshift-controller-manager/controller-manager-64f4f8899d-s2ccg" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.211051 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2fc29a28-fa62-47b4-8a6d-bf4860de462e-proxy-ca-bundles\") pod \"controller-manager-64f4f8899d-s2ccg\" (UID: \"2fc29a28-fa62-47b4-8a6d-bf4860de462e\") " pod="openshift-controller-manager/controller-manager-64f4f8899d-s2ccg" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.218632 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/46c1b1b6-4117-47d1-ae0f-5a00e9160b96-serving-cert\") pod \"route-controller-manager-77bbdf4988-ghrc9\" (UID: \"46c1b1b6-4117-47d1-ae0f-5a00e9160b96\") " pod="openshift-route-controller-manager/route-controller-manager-77bbdf4988-ghrc9" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.221818 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2fc29a28-fa62-47b4-8a6d-bf4860de462e-serving-cert\") pod \"controller-manager-64f4f8899d-s2ccg\" (UID: \"2fc29a28-fa62-47b4-8a6d-bf4860de462e\") " pod="openshift-controller-manager/controller-manager-64f4f8899d-s2ccg" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.225791 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9cwcc\" (UniqueName: \"kubernetes.io/projected/2fc29a28-fa62-47b4-8a6d-bf4860de462e-kube-api-access-9cwcc\") pod \"controller-manager-64f4f8899d-s2ccg\" (UID: \"2fc29a28-fa62-47b4-8a6d-bf4860de462e\") " pod="openshift-controller-manager/controller-manager-64f4f8899d-s2ccg" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.233317 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5wlx8\" (UniqueName: \"kubernetes.io/projected/46c1b1b6-4117-47d1-ae0f-5a00e9160b96-kube-api-access-5wlx8\") pod \"route-controller-manager-77bbdf4988-ghrc9\" (UID: \"46c1b1b6-4117-47d1-ae0f-5a00e9160b96\") " pod="openshift-route-controller-manager/route-controller-manager-77bbdf4988-ghrc9" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.377823 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-77bbdf4988-ghrc9" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.388467 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-64f4f8899d-s2ccg" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.571156 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-77bbdf4988-ghrc9"] Jan 05 21:56:45 crc kubenswrapper[4910]: W0105 21:56:45.582091 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod46c1b1b6_4117_47d1_ae0f_5a00e9160b96.slice/crio-007dfcbaa4fa9fe9603b7876e9dbe4bf4f736c363506eafd8a11bcfb9adf2eee WatchSource:0}: Error finding container 007dfcbaa4fa9fe9603b7876e9dbe4bf4f736c363506eafd8a11bcfb9adf2eee: Status 404 returned error can't find the container with id 007dfcbaa4fa9fe9603b7876e9dbe4bf4f736c363506eafd8a11bcfb9adf2eee Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.619761 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-64f4f8899d-s2ccg"] Jan 05 21:56:45 crc kubenswrapper[4910]: W0105 21:56:45.629089 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2fc29a28_fa62_47b4_8a6d_bf4860de462e.slice/crio-c7ee2845c13b1999be95f7f546ee0726fb0d5615bfd34c9baa546329798779ef WatchSource:0}: Error finding container c7ee2845c13b1999be95f7f546ee0726fb0d5615bfd34c9baa546329798779ef: Status 404 returned error can't find the container with id c7ee2845c13b1999be95f7f546ee0726fb0d5615bfd34c9baa546329798779ef Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.846250 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-64f4f8899d-s2ccg" event={"ID":"2fc29a28-fa62-47b4-8a6d-bf4860de462e","Type":"ContainerStarted","Data":"b81bca0b8443cc9c98530fda7a1b4a2818271416709cdb0a79d2152658f12443"} Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.847332 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-64f4f8899d-s2ccg" event={"ID":"2fc29a28-fa62-47b4-8a6d-bf4860de462e","Type":"ContainerStarted","Data":"c7ee2845c13b1999be95f7f546ee0726fb0d5615bfd34c9baa546329798779ef"} Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.847417 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-64f4f8899d-s2ccg" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.849997 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-77bbdf4988-ghrc9" event={"ID":"46c1b1b6-4117-47d1-ae0f-5a00e9160b96","Type":"ContainerStarted","Data":"ec15eda6f0fcc86b792859ef661468f7d73184669d20cf674f855bb6436b8152"} Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.850041 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-77bbdf4988-ghrc9" event={"ID":"46c1b1b6-4117-47d1-ae0f-5a00e9160b96","Type":"ContainerStarted","Data":"007dfcbaa4fa9fe9603b7876e9dbe4bf4f736c363506eafd8a11bcfb9adf2eee"} Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.850270 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-77bbdf4988-ghrc9" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.856149 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-64f4f8899d-s2ccg" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.880795 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-64f4f8899d-s2ccg" podStartSLOduration=2.880776371 podStartE2EDuration="2.880776371s" podCreationTimestamp="2026-01-05 21:56:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:56:45.865640861 +0000 UTC m=+337.443138541" watchObservedRunningTime="2026-01-05 21:56:45.880776371 +0000 UTC m=+337.458274041" Jan 05 21:56:45 crc kubenswrapper[4910]: I0105 21:56:45.897696 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-77bbdf4988-ghrc9" podStartSLOduration=2.897682606 podStartE2EDuration="2.897682606s" podCreationTimestamp="2026-01-05 21:56:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:56:45.881435658 +0000 UTC m=+337.458933348" watchObservedRunningTime="2026-01-05 21:56:45.897682606 +0000 UTC m=+337.475180276" Jan 05 21:56:46 crc kubenswrapper[4910]: I0105 21:56:46.279713 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-77bbdf4988-ghrc9" Jan 05 21:56:46 crc kubenswrapper[4910]: I0105 21:56:46.733154 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="72caeca9-12a4-49ea-bacf-9aaa07f625e5" path="/var/lib/kubelet/pods/72caeca9-12a4-49ea-bacf-9aaa07f625e5/volumes" Jan 05 21:56:46 crc kubenswrapper[4910]: I0105 21:56:46.734247 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8730029b-50dc-4410-bfa7-c76b57ac41be" path="/var/lib/kubelet/pods/8730029b-50dc-4410-bfa7-c76b57ac41be/volumes" Jan 05 21:57:10 crc kubenswrapper[4910]: I0105 21:57:10.952878 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 21:57:10 crc kubenswrapper[4910]: I0105 21:57:10.953448 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 21:57:16 crc kubenswrapper[4910]: I0105 21:57:16.638508 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-swkg7"] Jan 05 21:57:16 crc kubenswrapper[4910]: I0105 21:57:16.640150 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-swkg7" Jan 05 21:57:16 crc kubenswrapper[4910]: I0105 21:57:16.647023 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 05 21:57:16 crc kubenswrapper[4910]: I0105 21:57:16.667068 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-swkg7"] Jan 05 21:57:16 crc kubenswrapper[4910]: I0105 21:57:16.732798 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4145136a-da1b-4e59-bd3a-b7565fc66443-utilities\") pod \"redhat-marketplace-swkg7\" (UID: \"4145136a-da1b-4e59-bd3a-b7565fc66443\") " pod="openshift-marketplace/redhat-marketplace-swkg7" Jan 05 21:57:16 crc kubenswrapper[4910]: I0105 21:57:16.732840 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4145136a-da1b-4e59-bd3a-b7565fc66443-catalog-content\") pod \"redhat-marketplace-swkg7\" (UID: \"4145136a-da1b-4e59-bd3a-b7565fc66443\") " pod="openshift-marketplace/redhat-marketplace-swkg7" Jan 05 21:57:16 crc kubenswrapper[4910]: I0105 21:57:16.732891 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-82hc6\" (UniqueName: \"kubernetes.io/projected/4145136a-da1b-4e59-bd3a-b7565fc66443-kube-api-access-82hc6\") pod \"redhat-marketplace-swkg7\" (UID: \"4145136a-da1b-4e59-bd3a-b7565fc66443\") " pod="openshift-marketplace/redhat-marketplace-swkg7" Jan 05 21:57:16 crc kubenswrapper[4910]: I0105 21:57:16.833933 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4145136a-da1b-4e59-bd3a-b7565fc66443-catalog-content\") pod \"redhat-marketplace-swkg7\" (UID: \"4145136a-da1b-4e59-bd3a-b7565fc66443\") " pod="openshift-marketplace/redhat-marketplace-swkg7" Jan 05 21:57:16 crc kubenswrapper[4910]: I0105 21:57:16.834018 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-82hc6\" (UniqueName: \"kubernetes.io/projected/4145136a-da1b-4e59-bd3a-b7565fc66443-kube-api-access-82hc6\") pod \"redhat-marketplace-swkg7\" (UID: \"4145136a-da1b-4e59-bd3a-b7565fc66443\") " pod="openshift-marketplace/redhat-marketplace-swkg7" Jan 05 21:57:16 crc kubenswrapper[4910]: I0105 21:57:16.834064 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4145136a-da1b-4e59-bd3a-b7565fc66443-utilities\") pod \"redhat-marketplace-swkg7\" (UID: \"4145136a-da1b-4e59-bd3a-b7565fc66443\") " pod="openshift-marketplace/redhat-marketplace-swkg7" Jan 05 21:57:16 crc kubenswrapper[4910]: I0105 21:57:16.834617 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4145136a-da1b-4e59-bd3a-b7565fc66443-utilities\") pod \"redhat-marketplace-swkg7\" (UID: \"4145136a-da1b-4e59-bd3a-b7565fc66443\") " pod="openshift-marketplace/redhat-marketplace-swkg7" Jan 05 21:57:16 crc kubenswrapper[4910]: I0105 21:57:16.834607 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4145136a-da1b-4e59-bd3a-b7565fc66443-catalog-content\") pod \"redhat-marketplace-swkg7\" (UID: \"4145136a-da1b-4e59-bd3a-b7565fc66443\") " pod="openshift-marketplace/redhat-marketplace-swkg7" Jan 05 21:57:16 crc kubenswrapper[4910]: I0105 21:57:16.852491 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-82hc6\" (UniqueName: \"kubernetes.io/projected/4145136a-da1b-4e59-bd3a-b7565fc66443-kube-api-access-82hc6\") pod \"redhat-marketplace-swkg7\" (UID: \"4145136a-da1b-4e59-bd3a-b7565fc66443\") " pod="openshift-marketplace/redhat-marketplace-swkg7" Jan 05 21:57:16 crc kubenswrapper[4910]: I0105 21:57:16.956656 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-swkg7" Jan 05 21:57:17 crc kubenswrapper[4910]: I0105 21:57:17.388652 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-swkg7"] Jan 05 21:57:17 crc kubenswrapper[4910]: I0105 21:57:17.638416 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-lc2hp"] Jan 05 21:57:17 crc kubenswrapper[4910]: I0105 21:57:17.639475 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lc2hp" Jan 05 21:57:17 crc kubenswrapper[4910]: I0105 21:57:17.649881 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 05 21:57:17 crc kubenswrapper[4910]: I0105 21:57:17.655989 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lc2hp"] Jan 05 21:57:17 crc kubenswrapper[4910]: I0105 21:57:17.746092 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szhnx\" (UniqueName: \"kubernetes.io/projected/61bfc4cb-601d-4bbc-8820-59f6f8de1c63-kube-api-access-szhnx\") pod \"redhat-operators-lc2hp\" (UID: \"61bfc4cb-601d-4bbc-8820-59f6f8de1c63\") " pod="openshift-marketplace/redhat-operators-lc2hp" Jan 05 21:57:17 crc kubenswrapper[4910]: I0105 21:57:17.746180 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61bfc4cb-601d-4bbc-8820-59f6f8de1c63-catalog-content\") pod \"redhat-operators-lc2hp\" (UID: \"61bfc4cb-601d-4bbc-8820-59f6f8de1c63\") " pod="openshift-marketplace/redhat-operators-lc2hp" Jan 05 21:57:17 crc kubenswrapper[4910]: I0105 21:57:17.746314 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61bfc4cb-601d-4bbc-8820-59f6f8de1c63-utilities\") pod \"redhat-operators-lc2hp\" (UID: \"61bfc4cb-601d-4bbc-8820-59f6f8de1c63\") " pod="openshift-marketplace/redhat-operators-lc2hp" Jan 05 21:57:17 crc kubenswrapper[4910]: I0105 21:57:17.847782 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szhnx\" (UniqueName: \"kubernetes.io/projected/61bfc4cb-601d-4bbc-8820-59f6f8de1c63-kube-api-access-szhnx\") pod \"redhat-operators-lc2hp\" (UID: \"61bfc4cb-601d-4bbc-8820-59f6f8de1c63\") " pod="openshift-marketplace/redhat-operators-lc2hp" Jan 05 21:57:17 crc kubenswrapper[4910]: I0105 21:57:17.847870 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61bfc4cb-601d-4bbc-8820-59f6f8de1c63-catalog-content\") pod \"redhat-operators-lc2hp\" (UID: \"61bfc4cb-601d-4bbc-8820-59f6f8de1c63\") " pod="openshift-marketplace/redhat-operators-lc2hp" Jan 05 21:57:17 crc kubenswrapper[4910]: I0105 21:57:17.847924 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61bfc4cb-601d-4bbc-8820-59f6f8de1c63-utilities\") pod \"redhat-operators-lc2hp\" (UID: \"61bfc4cb-601d-4bbc-8820-59f6f8de1c63\") " pod="openshift-marketplace/redhat-operators-lc2hp" Jan 05 21:57:17 crc kubenswrapper[4910]: I0105 21:57:17.848465 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61bfc4cb-601d-4bbc-8820-59f6f8de1c63-catalog-content\") pod \"redhat-operators-lc2hp\" (UID: \"61bfc4cb-601d-4bbc-8820-59f6f8de1c63\") " pod="openshift-marketplace/redhat-operators-lc2hp" Jan 05 21:57:17 crc kubenswrapper[4910]: I0105 21:57:17.848626 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61bfc4cb-601d-4bbc-8820-59f6f8de1c63-utilities\") pod \"redhat-operators-lc2hp\" (UID: \"61bfc4cb-601d-4bbc-8820-59f6f8de1c63\") " pod="openshift-marketplace/redhat-operators-lc2hp" Jan 05 21:57:17 crc kubenswrapper[4910]: I0105 21:57:17.871658 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szhnx\" (UniqueName: \"kubernetes.io/projected/61bfc4cb-601d-4bbc-8820-59f6f8de1c63-kube-api-access-szhnx\") pod \"redhat-operators-lc2hp\" (UID: \"61bfc4cb-601d-4bbc-8820-59f6f8de1c63\") " pod="openshift-marketplace/redhat-operators-lc2hp" Jan 05 21:57:17 crc kubenswrapper[4910]: I0105 21:57:17.955824 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lc2hp" Jan 05 21:57:18 crc kubenswrapper[4910]: I0105 21:57:18.104514 4910 generic.go:334] "Generic (PLEG): container finished" podID="4145136a-da1b-4e59-bd3a-b7565fc66443" containerID="43fd0aded1d3d00e008cb379c8e2fa0c3574435dcc208c23dc461cfc520e99e4" exitCode=0 Jan 05 21:57:18 crc kubenswrapper[4910]: I0105 21:57:18.104588 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-swkg7" event={"ID":"4145136a-da1b-4e59-bd3a-b7565fc66443","Type":"ContainerDied","Data":"43fd0aded1d3d00e008cb379c8e2fa0c3574435dcc208c23dc461cfc520e99e4"} Jan 05 21:57:18 crc kubenswrapper[4910]: I0105 21:57:18.104673 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-swkg7" event={"ID":"4145136a-da1b-4e59-bd3a-b7565fc66443","Type":"ContainerStarted","Data":"8e6a9c15068acf10dbf0479aa7ee2c0862d33618bd4621839ee226b69f40fc47"} Jan 05 21:57:18 crc kubenswrapper[4910]: I0105 21:57:18.440876 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lc2hp"] Jan 05 21:57:18 crc kubenswrapper[4910]: W0105 21:57:18.441803 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod61bfc4cb_601d_4bbc_8820_59f6f8de1c63.slice/crio-a5fd094d2da832eb2f414520a68bf0106bcd976818d70aa28e98f9e05c776279 WatchSource:0}: Error finding container a5fd094d2da832eb2f414520a68bf0106bcd976818d70aa28e98f9e05c776279: Status 404 returned error can't find the container with id a5fd094d2da832eb2f414520a68bf0106bcd976818d70aa28e98f9e05c776279 Jan 05 21:57:19 crc kubenswrapper[4910]: I0105 21:57:19.111317 4910 generic.go:334] "Generic (PLEG): container finished" podID="4145136a-da1b-4e59-bd3a-b7565fc66443" containerID="e15000226da5cc5f106f9d248a6fea20a2616a8bd29d8bc8b6f27b13fcaeb164" exitCode=0 Jan 05 21:57:19 crc kubenswrapper[4910]: I0105 21:57:19.111559 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-swkg7" event={"ID":"4145136a-da1b-4e59-bd3a-b7565fc66443","Type":"ContainerDied","Data":"e15000226da5cc5f106f9d248a6fea20a2616a8bd29d8bc8b6f27b13fcaeb164"} Jan 05 21:57:19 crc kubenswrapper[4910]: I0105 21:57:19.115003 4910 generic.go:334] "Generic (PLEG): container finished" podID="61bfc4cb-601d-4bbc-8820-59f6f8de1c63" containerID="5b7dea602272372283ead677046d9168d7852fb1b9c96fa899a1f10f77c7dd8f" exitCode=0 Jan 05 21:57:19 crc kubenswrapper[4910]: I0105 21:57:19.115513 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lc2hp" event={"ID":"61bfc4cb-601d-4bbc-8820-59f6f8de1c63","Type":"ContainerDied","Data":"5b7dea602272372283ead677046d9168d7852fb1b9c96fa899a1f10f77c7dd8f"} Jan 05 21:57:19 crc kubenswrapper[4910]: I0105 21:57:19.115559 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lc2hp" event={"ID":"61bfc4cb-601d-4bbc-8820-59f6f8de1c63","Type":"ContainerStarted","Data":"a5fd094d2da832eb2f414520a68bf0106bcd976818d70aa28e98f9e05c776279"} Jan 05 21:57:19 crc kubenswrapper[4910]: I0105 21:57:19.237532 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-gj6th"] Jan 05 21:57:19 crc kubenswrapper[4910]: I0105 21:57:19.238773 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gj6th" Jan 05 21:57:19 crc kubenswrapper[4910]: I0105 21:57:19.244135 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 05 21:57:19 crc kubenswrapper[4910]: I0105 21:57:19.247446 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gj6th"] Jan 05 21:57:19 crc kubenswrapper[4910]: I0105 21:57:19.379141 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36f587d4-ab14-4c64-9fe6-fd09211dd62c-catalog-content\") pod \"certified-operators-gj6th\" (UID: \"36f587d4-ab14-4c64-9fe6-fd09211dd62c\") " pod="openshift-marketplace/certified-operators-gj6th" Jan 05 21:57:19 crc kubenswrapper[4910]: I0105 21:57:19.379530 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36f587d4-ab14-4c64-9fe6-fd09211dd62c-utilities\") pod \"certified-operators-gj6th\" (UID: \"36f587d4-ab14-4c64-9fe6-fd09211dd62c\") " pod="openshift-marketplace/certified-operators-gj6th" Jan 05 21:57:19 crc kubenswrapper[4910]: I0105 21:57:19.379664 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5r9sf\" (UniqueName: \"kubernetes.io/projected/36f587d4-ab14-4c64-9fe6-fd09211dd62c-kube-api-access-5r9sf\") pod \"certified-operators-gj6th\" (UID: \"36f587d4-ab14-4c64-9fe6-fd09211dd62c\") " pod="openshift-marketplace/certified-operators-gj6th" Jan 05 21:57:19 crc kubenswrapper[4910]: I0105 21:57:19.481067 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36f587d4-ab14-4c64-9fe6-fd09211dd62c-catalog-content\") pod \"certified-operators-gj6th\" (UID: \"36f587d4-ab14-4c64-9fe6-fd09211dd62c\") " pod="openshift-marketplace/certified-operators-gj6th" Jan 05 21:57:19 crc kubenswrapper[4910]: I0105 21:57:19.481139 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36f587d4-ab14-4c64-9fe6-fd09211dd62c-utilities\") pod \"certified-operators-gj6th\" (UID: \"36f587d4-ab14-4c64-9fe6-fd09211dd62c\") " pod="openshift-marketplace/certified-operators-gj6th" Jan 05 21:57:19 crc kubenswrapper[4910]: I0105 21:57:19.481199 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5r9sf\" (UniqueName: \"kubernetes.io/projected/36f587d4-ab14-4c64-9fe6-fd09211dd62c-kube-api-access-5r9sf\") pod \"certified-operators-gj6th\" (UID: \"36f587d4-ab14-4c64-9fe6-fd09211dd62c\") " pod="openshift-marketplace/certified-operators-gj6th" Jan 05 21:57:19 crc kubenswrapper[4910]: I0105 21:57:19.481702 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36f587d4-ab14-4c64-9fe6-fd09211dd62c-catalog-content\") pod \"certified-operators-gj6th\" (UID: \"36f587d4-ab14-4c64-9fe6-fd09211dd62c\") " pod="openshift-marketplace/certified-operators-gj6th" Jan 05 21:57:19 crc kubenswrapper[4910]: I0105 21:57:19.484075 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36f587d4-ab14-4c64-9fe6-fd09211dd62c-utilities\") pod \"certified-operators-gj6th\" (UID: \"36f587d4-ab14-4c64-9fe6-fd09211dd62c\") " pod="openshift-marketplace/certified-operators-gj6th" Jan 05 21:57:19 crc kubenswrapper[4910]: I0105 21:57:19.507398 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5r9sf\" (UniqueName: \"kubernetes.io/projected/36f587d4-ab14-4c64-9fe6-fd09211dd62c-kube-api-access-5r9sf\") pod \"certified-operators-gj6th\" (UID: \"36f587d4-ab14-4c64-9fe6-fd09211dd62c\") " pod="openshift-marketplace/certified-operators-gj6th" Jan 05 21:57:19 crc kubenswrapper[4910]: I0105 21:57:19.511015 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-64f4f8899d-s2ccg"] Jan 05 21:57:19 crc kubenswrapper[4910]: I0105 21:57:19.512768 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-64f4f8899d-s2ccg" podUID="2fc29a28-fa62-47b4-8a6d-bf4860de462e" containerName="controller-manager" containerID="cri-o://b81bca0b8443cc9c98530fda7a1b4a2818271416709cdb0a79d2152658f12443" gracePeriod=30 Jan 05 21:57:19 crc kubenswrapper[4910]: I0105 21:57:19.595626 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gj6th" Jan 05 21:57:19 crc kubenswrapper[4910]: I0105 21:57:19.596660 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-77bbdf4988-ghrc9"] Jan 05 21:57:19 crc kubenswrapper[4910]: I0105 21:57:19.596938 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-77bbdf4988-ghrc9" podUID="46c1b1b6-4117-47d1-ae0f-5a00e9160b96" containerName="route-controller-manager" containerID="cri-o://ec15eda6f0fcc86b792859ef661468f7d73184669d20cf674f855bb6436b8152" gracePeriod=30 Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.018096 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-64f4f8899d-s2ccg" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.071212 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-77bbdf4988-ghrc9" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.127862 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lc2hp" event={"ID":"61bfc4cb-601d-4bbc-8820-59f6f8de1c63","Type":"ContainerStarted","Data":"0462492afc9fae44ee735a0c6e805789f3d725b587b2b51101ce7716ec5fc413"} Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.132262 4910 generic.go:334] "Generic (PLEG): container finished" podID="2fc29a28-fa62-47b4-8a6d-bf4860de462e" containerID="b81bca0b8443cc9c98530fda7a1b4a2818271416709cdb0a79d2152658f12443" exitCode=0 Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.132343 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-64f4f8899d-s2ccg" event={"ID":"2fc29a28-fa62-47b4-8a6d-bf4860de462e","Type":"ContainerDied","Data":"b81bca0b8443cc9c98530fda7a1b4a2818271416709cdb0a79d2152658f12443"} Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.132385 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-64f4f8899d-s2ccg" event={"ID":"2fc29a28-fa62-47b4-8a6d-bf4860de462e","Type":"ContainerDied","Data":"c7ee2845c13b1999be95f7f546ee0726fb0d5615bfd34c9baa546329798779ef"} Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.132301 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-64f4f8899d-s2ccg" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.132404 4910 scope.go:117] "RemoveContainer" containerID="b81bca0b8443cc9c98530fda7a1b4a2818271416709cdb0a79d2152658f12443" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.134627 4910 generic.go:334] "Generic (PLEG): container finished" podID="46c1b1b6-4117-47d1-ae0f-5a00e9160b96" containerID="ec15eda6f0fcc86b792859ef661468f7d73184669d20cf674f855bb6436b8152" exitCode=0 Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.134726 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-77bbdf4988-ghrc9" event={"ID":"46c1b1b6-4117-47d1-ae0f-5a00e9160b96","Type":"ContainerDied","Data":"ec15eda6f0fcc86b792859ef661468f7d73184669d20cf674f855bb6436b8152"} Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.134754 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-77bbdf4988-ghrc9" event={"ID":"46c1b1b6-4117-47d1-ae0f-5a00e9160b96","Type":"ContainerDied","Data":"007dfcbaa4fa9fe9603b7876e9dbe4bf4f736c363506eafd8a11bcfb9adf2eee"} Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.134798 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-77bbdf4988-ghrc9" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.137498 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-swkg7" event={"ID":"4145136a-da1b-4e59-bd3a-b7565fc66443","Type":"ContainerStarted","Data":"37155ee0417b26d2a87624f4962ca4df6b37dd282f63d246943dc0d6da157b9d"} Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.147313 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gj6th"] Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.159471 4910 scope.go:117] "RemoveContainer" containerID="b81bca0b8443cc9c98530fda7a1b4a2818271416709cdb0a79d2152658f12443" Jan 05 21:57:20 crc kubenswrapper[4910]: E0105 21:57:20.159966 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b81bca0b8443cc9c98530fda7a1b4a2818271416709cdb0a79d2152658f12443\": container with ID starting with b81bca0b8443cc9c98530fda7a1b4a2818271416709cdb0a79d2152658f12443 not found: ID does not exist" containerID="b81bca0b8443cc9c98530fda7a1b4a2818271416709cdb0a79d2152658f12443" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.160040 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b81bca0b8443cc9c98530fda7a1b4a2818271416709cdb0a79d2152658f12443"} err="failed to get container status \"b81bca0b8443cc9c98530fda7a1b4a2818271416709cdb0a79d2152658f12443\": rpc error: code = NotFound desc = could not find container \"b81bca0b8443cc9c98530fda7a1b4a2818271416709cdb0a79d2152658f12443\": container with ID starting with b81bca0b8443cc9c98530fda7a1b4a2818271416709cdb0a79d2152658f12443 not found: ID does not exist" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.160077 4910 scope.go:117] "RemoveContainer" containerID="ec15eda6f0fcc86b792859ef661468f7d73184669d20cf674f855bb6436b8152" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.173242 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-swkg7" podStartSLOduration=2.48551348 podStartE2EDuration="4.17321854s" podCreationTimestamp="2026-01-05 21:57:16 +0000 UTC" firstStartedPulling="2026-01-05 21:57:18.106355817 +0000 UTC m=+369.683853487" lastFinishedPulling="2026-01-05 21:57:19.794060877 +0000 UTC m=+371.371558547" observedRunningTime="2026-01-05 21:57:20.165930953 +0000 UTC m=+371.743428623" watchObservedRunningTime="2026-01-05 21:57:20.17321854 +0000 UTC m=+371.750716210" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.179914 4910 scope.go:117] "RemoveContainer" containerID="ec15eda6f0fcc86b792859ef661468f7d73184669d20cf674f855bb6436b8152" Jan 05 21:57:20 crc kubenswrapper[4910]: E0105 21:57:20.180847 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ec15eda6f0fcc86b792859ef661468f7d73184669d20cf674f855bb6436b8152\": container with ID starting with ec15eda6f0fcc86b792859ef661468f7d73184669d20cf674f855bb6436b8152 not found: ID does not exist" containerID="ec15eda6f0fcc86b792859ef661468f7d73184669d20cf674f855bb6436b8152" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.180906 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ec15eda6f0fcc86b792859ef661468f7d73184669d20cf674f855bb6436b8152"} err="failed to get container status \"ec15eda6f0fcc86b792859ef661468f7d73184669d20cf674f855bb6436b8152\": rpc error: code = NotFound desc = could not find container \"ec15eda6f0fcc86b792859ef661468f7d73184669d20cf674f855bb6436b8152\": container with ID starting with ec15eda6f0fcc86b792859ef661468f7d73184669d20cf674f855bb6436b8152 not found: ID does not exist" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.194352 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2fc29a28-fa62-47b4-8a6d-bf4860de462e-config\") pod \"2fc29a28-fa62-47b4-8a6d-bf4860de462e\" (UID: \"2fc29a28-fa62-47b4-8a6d-bf4860de462e\") " Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.194414 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9cwcc\" (UniqueName: \"kubernetes.io/projected/2fc29a28-fa62-47b4-8a6d-bf4860de462e-kube-api-access-9cwcc\") pod \"2fc29a28-fa62-47b4-8a6d-bf4860de462e\" (UID: \"2fc29a28-fa62-47b4-8a6d-bf4860de462e\") " Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.194451 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/46c1b1b6-4117-47d1-ae0f-5a00e9160b96-client-ca\") pod \"46c1b1b6-4117-47d1-ae0f-5a00e9160b96\" (UID: \"46c1b1b6-4117-47d1-ae0f-5a00e9160b96\") " Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.194470 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2fc29a28-fa62-47b4-8a6d-bf4860de462e-client-ca\") pod \"2fc29a28-fa62-47b4-8a6d-bf4860de462e\" (UID: \"2fc29a28-fa62-47b4-8a6d-bf4860de462e\") " Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.194497 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/46c1b1b6-4117-47d1-ae0f-5a00e9160b96-config\") pod \"46c1b1b6-4117-47d1-ae0f-5a00e9160b96\" (UID: \"46c1b1b6-4117-47d1-ae0f-5a00e9160b96\") " Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.194540 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2fc29a28-fa62-47b4-8a6d-bf4860de462e-proxy-ca-bundles\") pod \"2fc29a28-fa62-47b4-8a6d-bf4860de462e\" (UID: \"2fc29a28-fa62-47b4-8a6d-bf4860de462e\") " Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.194620 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/46c1b1b6-4117-47d1-ae0f-5a00e9160b96-serving-cert\") pod \"46c1b1b6-4117-47d1-ae0f-5a00e9160b96\" (UID: \"46c1b1b6-4117-47d1-ae0f-5a00e9160b96\") " Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.194648 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2fc29a28-fa62-47b4-8a6d-bf4860de462e-serving-cert\") pod \"2fc29a28-fa62-47b4-8a6d-bf4860de462e\" (UID: \"2fc29a28-fa62-47b4-8a6d-bf4860de462e\") " Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.194664 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5wlx8\" (UniqueName: \"kubernetes.io/projected/46c1b1b6-4117-47d1-ae0f-5a00e9160b96-kube-api-access-5wlx8\") pod \"46c1b1b6-4117-47d1-ae0f-5a00e9160b96\" (UID: \"46c1b1b6-4117-47d1-ae0f-5a00e9160b96\") " Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.195870 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2fc29a28-fa62-47b4-8a6d-bf4860de462e-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "2fc29a28-fa62-47b4-8a6d-bf4860de462e" (UID: "2fc29a28-fa62-47b4-8a6d-bf4860de462e"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.195921 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/46c1b1b6-4117-47d1-ae0f-5a00e9160b96-config" (OuterVolumeSpecName: "config") pod "46c1b1b6-4117-47d1-ae0f-5a00e9160b96" (UID: "46c1b1b6-4117-47d1-ae0f-5a00e9160b96"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.195898 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2fc29a28-fa62-47b4-8a6d-bf4860de462e-client-ca" (OuterVolumeSpecName: "client-ca") pod "2fc29a28-fa62-47b4-8a6d-bf4860de462e" (UID: "2fc29a28-fa62-47b4-8a6d-bf4860de462e"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.196202 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/46c1b1b6-4117-47d1-ae0f-5a00e9160b96-client-ca" (OuterVolumeSpecName: "client-ca") pod "46c1b1b6-4117-47d1-ae0f-5a00e9160b96" (UID: "46c1b1b6-4117-47d1-ae0f-5a00e9160b96"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.196637 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2fc29a28-fa62-47b4-8a6d-bf4860de462e-config" (OuterVolumeSpecName: "config") pod "2fc29a28-fa62-47b4-8a6d-bf4860de462e" (UID: "2fc29a28-fa62-47b4-8a6d-bf4860de462e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.201703 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2fc29a28-fa62-47b4-8a6d-bf4860de462e-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "2fc29a28-fa62-47b4-8a6d-bf4860de462e" (UID: "2fc29a28-fa62-47b4-8a6d-bf4860de462e"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.201815 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46c1b1b6-4117-47d1-ae0f-5a00e9160b96-kube-api-access-5wlx8" (OuterVolumeSpecName: "kube-api-access-5wlx8") pod "46c1b1b6-4117-47d1-ae0f-5a00e9160b96" (UID: "46c1b1b6-4117-47d1-ae0f-5a00e9160b96"). InnerVolumeSpecName "kube-api-access-5wlx8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.201981 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/46c1b1b6-4117-47d1-ae0f-5a00e9160b96-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "46c1b1b6-4117-47d1-ae0f-5a00e9160b96" (UID: "46c1b1b6-4117-47d1-ae0f-5a00e9160b96"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.202462 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2fc29a28-fa62-47b4-8a6d-bf4860de462e-kube-api-access-9cwcc" (OuterVolumeSpecName: "kube-api-access-9cwcc") pod "2fc29a28-fa62-47b4-8a6d-bf4860de462e" (UID: "2fc29a28-fa62-47b4-8a6d-bf4860de462e"). InnerVolumeSpecName "kube-api-access-9cwcc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.238649 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-89z5z"] Jan 05 21:57:20 crc kubenswrapper[4910]: E0105 21:57:20.238886 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fc29a28-fa62-47b4-8a6d-bf4860de462e" containerName="controller-manager" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.238936 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fc29a28-fa62-47b4-8a6d-bf4860de462e" containerName="controller-manager" Jan 05 21:57:20 crc kubenswrapper[4910]: E0105 21:57:20.238955 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c1b1b6-4117-47d1-ae0f-5a00e9160b96" containerName="route-controller-manager" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.238962 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c1b1b6-4117-47d1-ae0f-5a00e9160b96" containerName="route-controller-manager" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.239071 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fc29a28-fa62-47b4-8a6d-bf4860de462e" containerName="controller-manager" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.239085 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="46c1b1b6-4117-47d1-ae0f-5a00e9160b96" containerName="route-controller-manager" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.241189 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-89z5z" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.245211 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.249399 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-89z5z"] Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.296754 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/46c1b1b6-4117-47d1-ae0f-5a00e9160b96-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.296794 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2fc29a28-fa62-47b4-8a6d-bf4860de462e-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.296812 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5wlx8\" (UniqueName: \"kubernetes.io/projected/46c1b1b6-4117-47d1-ae0f-5a00e9160b96-kube-api-access-5wlx8\") on node \"crc\" DevicePath \"\"" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.296825 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2fc29a28-fa62-47b4-8a6d-bf4860de462e-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.296837 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9cwcc\" (UniqueName: \"kubernetes.io/projected/2fc29a28-fa62-47b4-8a6d-bf4860de462e-kube-api-access-9cwcc\") on node \"crc\" DevicePath \"\"" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.296849 4910 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/46c1b1b6-4117-47d1-ae0f-5a00e9160b96-client-ca\") on node \"crc\" DevicePath \"\"" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.296860 4910 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2fc29a28-fa62-47b4-8a6d-bf4860de462e-client-ca\") on node \"crc\" DevicePath \"\"" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.296873 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/46c1b1b6-4117-47d1-ae0f-5a00e9160b96-config\") on node \"crc\" DevicePath \"\"" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.296883 4910 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2fc29a28-fa62-47b4-8a6d-bf4860de462e-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.398310 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7pzdc\" (UniqueName: \"kubernetes.io/projected/9196e611-5468-4663-97c6-d50a40771bb4-kube-api-access-7pzdc\") pod \"community-operators-89z5z\" (UID: \"9196e611-5468-4663-97c6-d50a40771bb4\") " pod="openshift-marketplace/community-operators-89z5z" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.398815 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9196e611-5468-4663-97c6-d50a40771bb4-catalog-content\") pod \"community-operators-89z5z\" (UID: \"9196e611-5468-4663-97c6-d50a40771bb4\") " pod="openshift-marketplace/community-operators-89z5z" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.398849 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9196e611-5468-4663-97c6-d50a40771bb4-utilities\") pod \"community-operators-89z5z\" (UID: \"9196e611-5468-4663-97c6-d50a40771bb4\") " pod="openshift-marketplace/community-operators-89z5z" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.465646 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-64f4f8899d-s2ccg"] Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.470803 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-64f4f8899d-s2ccg"] Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.479358 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-77bbdf4988-ghrc9"] Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.485062 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-77bbdf4988-ghrc9"] Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.502144 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7pzdc\" (UniqueName: \"kubernetes.io/projected/9196e611-5468-4663-97c6-d50a40771bb4-kube-api-access-7pzdc\") pod \"community-operators-89z5z\" (UID: \"9196e611-5468-4663-97c6-d50a40771bb4\") " pod="openshift-marketplace/community-operators-89z5z" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.502267 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9196e611-5468-4663-97c6-d50a40771bb4-catalog-content\") pod \"community-operators-89z5z\" (UID: \"9196e611-5468-4663-97c6-d50a40771bb4\") " pod="openshift-marketplace/community-operators-89z5z" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.502310 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9196e611-5468-4663-97c6-d50a40771bb4-utilities\") pod \"community-operators-89z5z\" (UID: \"9196e611-5468-4663-97c6-d50a40771bb4\") " pod="openshift-marketplace/community-operators-89z5z" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.503323 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9196e611-5468-4663-97c6-d50a40771bb4-catalog-content\") pod \"community-operators-89z5z\" (UID: \"9196e611-5468-4663-97c6-d50a40771bb4\") " pod="openshift-marketplace/community-operators-89z5z" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.505235 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9196e611-5468-4663-97c6-d50a40771bb4-utilities\") pod \"community-operators-89z5z\" (UID: \"9196e611-5468-4663-97c6-d50a40771bb4\") " pod="openshift-marketplace/community-operators-89z5z" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.524695 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7pzdc\" (UniqueName: \"kubernetes.io/projected/9196e611-5468-4663-97c6-d50a40771bb4-kube-api-access-7pzdc\") pod \"community-operators-89z5z\" (UID: \"9196e611-5468-4663-97c6-d50a40771bb4\") " pod="openshift-marketplace/community-operators-89z5z" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.575306 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-89z5z" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.742238 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2fc29a28-fa62-47b4-8a6d-bf4860de462e" path="/var/lib/kubelet/pods/2fc29a28-fa62-47b4-8a6d-bf4860de462e/volumes" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.743305 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="46c1b1b6-4117-47d1-ae0f-5a00e9160b96" path="/var/lib/kubelet/pods/46c1b1b6-4117-47d1-ae0f-5a00e9160b96/volumes" Jan 05 21:57:20 crc kubenswrapper[4910]: I0105 21:57:20.800301 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-89z5z"] Jan 05 21:57:20 crc kubenswrapper[4910]: W0105 21:57:20.807415 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9196e611_5468_4663_97c6_d50a40771bb4.slice/crio-fa9c7ca1c058bf96a43691a735283278ec6a7ddeb034ca54683e0897af17e7cd WatchSource:0}: Error finding container fa9c7ca1c058bf96a43691a735283278ec6a7ddeb034ca54683e0897af17e7cd: Status 404 returned error can't find the container with id fa9c7ca1c058bf96a43691a735283278ec6a7ddeb034ca54683e0897af17e7cd Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.039179 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-675c76bd5c-m2wml"] Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.040626 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-675c76bd5c-m2wml" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.040751 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-c9bdf6bbc-wnvjj"] Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.041861 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-c9bdf6bbc-wnvjj" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.042704 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.043095 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.043318 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.043346 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.043641 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.043647 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.045258 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.045454 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.045475 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.045628 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.045628 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.045679 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.052251 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.053496 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-675c76bd5c-m2wml"] Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.057812 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-c9bdf6bbc-wnvjj"] Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.147942 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-89z5z" event={"ID":"9196e611-5468-4663-97c6-d50a40771bb4","Type":"ContainerStarted","Data":"fa9c7ca1c058bf96a43691a735283278ec6a7ddeb034ca54683e0897af17e7cd"} Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.150140 4910 generic.go:334] "Generic (PLEG): container finished" podID="36f587d4-ab14-4c64-9fe6-fd09211dd62c" containerID="daa40587f52381dc6c8f454feb2622b8c7c1b759fe92a9be8084569577221770" exitCode=0 Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.150216 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gj6th" event={"ID":"36f587d4-ab14-4c64-9fe6-fd09211dd62c","Type":"ContainerDied","Data":"daa40587f52381dc6c8f454feb2622b8c7c1b759fe92a9be8084569577221770"} Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.150239 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gj6th" event={"ID":"36f587d4-ab14-4c64-9fe6-fd09211dd62c","Type":"ContainerStarted","Data":"319dae3ab7afe985b12eb2839e72f3ac9d9bdfa726037f6965211383127d7f52"} Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.152725 4910 generic.go:334] "Generic (PLEG): container finished" podID="61bfc4cb-601d-4bbc-8820-59f6f8de1c63" containerID="0462492afc9fae44ee735a0c6e805789f3d725b587b2b51101ce7716ec5fc413" exitCode=0 Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.152827 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lc2hp" event={"ID":"61bfc4cb-601d-4bbc-8820-59f6f8de1c63","Type":"ContainerDied","Data":"0462492afc9fae44ee735a0c6e805789f3d725b587b2b51101ce7716ec5fc413"} Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.210590 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f77dcb0-5c22-4bb7-b022-42807e5a3af2-config\") pod \"controller-manager-675c76bd5c-m2wml\" (UID: \"1f77dcb0-5c22-4bb7-b022-42807e5a3af2\") " pod="openshift-controller-manager/controller-manager-675c76bd5c-m2wml" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.210653 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/87e96058-622f-4c18-b232-200b27bae804-serving-cert\") pod \"route-controller-manager-c9bdf6bbc-wnvjj\" (UID: \"87e96058-622f-4c18-b232-200b27bae804\") " pod="openshift-route-controller-manager/route-controller-manager-c9bdf6bbc-wnvjj" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.210687 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/87e96058-622f-4c18-b232-200b27bae804-client-ca\") pod \"route-controller-manager-c9bdf6bbc-wnvjj\" (UID: \"87e96058-622f-4c18-b232-200b27bae804\") " pod="openshift-route-controller-manager/route-controller-manager-c9bdf6bbc-wnvjj" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.210704 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1f77dcb0-5c22-4bb7-b022-42807e5a3af2-client-ca\") pod \"controller-manager-675c76bd5c-m2wml\" (UID: \"1f77dcb0-5c22-4bb7-b022-42807e5a3af2\") " pod="openshift-controller-manager/controller-manager-675c76bd5c-m2wml" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.210719 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1f77dcb0-5c22-4bb7-b022-42807e5a3af2-serving-cert\") pod \"controller-manager-675c76bd5c-m2wml\" (UID: \"1f77dcb0-5c22-4bb7-b022-42807e5a3af2\") " pod="openshift-controller-manager/controller-manager-675c76bd5c-m2wml" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.210746 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1f77dcb0-5c22-4bb7-b022-42807e5a3af2-proxy-ca-bundles\") pod \"controller-manager-675c76bd5c-m2wml\" (UID: \"1f77dcb0-5c22-4bb7-b022-42807e5a3af2\") " pod="openshift-controller-manager/controller-manager-675c76bd5c-m2wml" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.210769 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87e96058-622f-4c18-b232-200b27bae804-config\") pod \"route-controller-manager-c9bdf6bbc-wnvjj\" (UID: \"87e96058-622f-4c18-b232-200b27bae804\") " pod="openshift-route-controller-manager/route-controller-manager-c9bdf6bbc-wnvjj" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.210786 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4cvcn\" (UniqueName: \"kubernetes.io/projected/87e96058-622f-4c18-b232-200b27bae804-kube-api-access-4cvcn\") pod \"route-controller-manager-c9bdf6bbc-wnvjj\" (UID: \"87e96058-622f-4c18-b232-200b27bae804\") " pod="openshift-route-controller-manager/route-controller-manager-c9bdf6bbc-wnvjj" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.210833 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kqknz\" (UniqueName: \"kubernetes.io/projected/1f77dcb0-5c22-4bb7-b022-42807e5a3af2-kube-api-access-kqknz\") pod \"controller-manager-675c76bd5c-m2wml\" (UID: \"1f77dcb0-5c22-4bb7-b022-42807e5a3af2\") " pod="openshift-controller-manager/controller-manager-675c76bd5c-m2wml" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.311896 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f77dcb0-5c22-4bb7-b022-42807e5a3af2-config\") pod \"controller-manager-675c76bd5c-m2wml\" (UID: \"1f77dcb0-5c22-4bb7-b022-42807e5a3af2\") " pod="openshift-controller-manager/controller-manager-675c76bd5c-m2wml" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.311956 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/87e96058-622f-4c18-b232-200b27bae804-serving-cert\") pod \"route-controller-manager-c9bdf6bbc-wnvjj\" (UID: \"87e96058-622f-4c18-b232-200b27bae804\") " pod="openshift-route-controller-manager/route-controller-manager-c9bdf6bbc-wnvjj" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.311987 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/87e96058-622f-4c18-b232-200b27bae804-client-ca\") pod \"route-controller-manager-c9bdf6bbc-wnvjj\" (UID: \"87e96058-622f-4c18-b232-200b27bae804\") " pod="openshift-route-controller-manager/route-controller-manager-c9bdf6bbc-wnvjj" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.312006 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1f77dcb0-5c22-4bb7-b022-42807e5a3af2-client-ca\") pod \"controller-manager-675c76bd5c-m2wml\" (UID: \"1f77dcb0-5c22-4bb7-b022-42807e5a3af2\") " pod="openshift-controller-manager/controller-manager-675c76bd5c-m2wml" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.312025 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1f77dcb0-5c22-4bb7-b022-42807e5a3af2-serving-cert\") pod \"controller-manager-675c76bd5c-m2wml\" (UID: \"1f77dcb0-5c22-4bb7-b022-42807e5a3af2\") " pod="openshift-controller-manager/controller-manager-675c76bd5c-m2wml" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.312052 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87e96058-622f-4c18-b232-200b27bae804-config\") pod \"route-controller-manager-c9bdf6bbc-wnvjj\" (UID: \"87e96058-622f-4c18-b232-200b27bae804\") " pod="openshift-route-controller-manager/route-controller-manager-c9bdf6bbc-wnvjj" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.312069 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1f77dcb0-5c22-4bb7-b022-42807e5a3af2-proxy-ca-bundles\") pod \"controller-manager-675c76bd5c-m2wml\" (UID: \"1f77dcb0-5c22-4bb7-b022-42807e5a3af2\") " pod="openshift-controller-manager/controller-manager-675c76bd5c-m2wml" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.312091 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4cvcn\" (UniqueName: \"kubernetes.io/projected/87e96058-622f-4c18-b232-200b27bae804-kube-api-access-4cvcn\") pod \"route-controller-manager-c9bdf6bbc-wnvjj\" (UID: \"87e96058-622f-4c18-b232-200b27bae804\") " pod="openshift-route-controller-manager/route-controller-manager-c9bdf6bbc-wnvjj" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.312152 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kqknz\" (UniqueName: \"kubernetes.io/projected/1f77dcb0-5c22-4bb7-b022-42807e5a3af2-kube-api-access-kqknz\") pod \"controller-manager-675c76bd5c-m2wml\" (UID: \"1f77dcb0-5c22-4bb7-b022-42807e5a3af2\") " pod="openshift-controller-manager/controller-manager-675c76bd5c-m2wml" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.313142 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/87e96058-622f-4c18-b232-200b27bae804-client-ca\") pod \"route-controller-manager-c9bdf6bbc-wnvjj\" (UID: \"87e96058-622f-4c18-b232-200b27bae804\") " pod="openshift-route-controller-manager/route-controller-manager-c9bdf6bbc-wnvjj" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.313604 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f77dcb0-5c22-4bb7-b022-42807e5a3af2-config\") pod \"controller-manager-675c76bd5c-m2wml\" (UID: \"1f77dcb0-5c22-4bb7-b022-42807e5a3af2\") " pod="openshift-controller-manager/controller-manager-675c76bd5c-m2wml" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.313612 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87e96058-622f-4c18-b232-200b27bae804-config\") pod \"route-controller-manager-c9bdf6bbc-wnvjj\" (UID: \"87e96058-622f-4c18-b232-200b27bae804\") " pod="openshift-route-controller-manager/route-controller-manager-c9bdf6bbc-wnvjj" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.314272 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1f77dcb0-5c22-4bb7-b022-42807e5a3af2-client-ca\") pod \"controller-manager-675c76bd5c-m2wml\" (UID: \"1f77dcb0-5c22-4bb7-b022-42807e5a3af2\") " pod="openshift-controller-manager/controller-manager-675c76bd5c-m2wml" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.314977 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1f77dcb0-5c22-4bb7-b022-42807e5a3af2-proxy-ca-bundles\") pod \"controller-manager-675c76bd5c-m2wml\" (UID: \"1f77dcb0-5c22-4bb7-b022-42807e5a3af2\") " pod="openshift-controller-manager/controller-manager-675c76bd5c-m2wml" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.320144 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/87e96058-622f-4c18-b232-200b27bae804-serving-cert\") pod \"route-controller-manager-c9bdf6bbc-wnvjj\" (UID: \"87e96058-622f-4c18-b232-200b27bae804\") " pod="openshift-route-controller-manager/route-controller-manager-c9bdf6bbc-wnvjj" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.320315 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1f77dcb0-5c22-4bb7-b022-42807e5a3af2-serving-cert\") pod \"controller-manager-675c76bd5c-m2wml\" (UID: \"1f77dcb0-5c22-4bb7-b022-42807e5a3af2\") " pod="openshift-controller-manager/controller-manager-675c76bd5c-m2wml" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.331877 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kqknz\" (UniqueName: \"kubernetes.io/projected/1f77dcb0-5c22-4bb7-b022-42807e5a3af2-kube-api-access-kqknz\") pod \"controller-manager-675c76bd5c-m2wml\" (UID: \"1f77dcb0-5c22-4bb7-b022-42807e5a3af2\") " pod="openshift-controller-manager/controller-manager-675c76bd5c-m2wml" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.334303 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4cvcn\" (UniqueName: \"kubernetes.io/projected/87e96058-622f-4c18-b232-200b27bae804-kube-api-access-4cvcn\") pod \"route-controller-manager-c9bdf6bbc-wnvjj\" (UID: \"87e96058-622f-4c18-b232-200b27bae804\") " pod="openshift-route-controller-manager/route-controller-manager-c9bdf6bbc-wnvjj" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.363082 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-675c76bd5c-m2wml" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.372911 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-c9bdf6bbc-wnvjj" Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.584089 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-675c76bd5c-m2wml"] Jan 05 21:57:21 crc kubenswrapper[4910]: W0105 21:57:21.591205 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1f77dcb0_5c22_4bb7_b022_42807e5a3af2.slice/crio-e3b869131b8c066fec93ddf2d8ed38ed0f80acb5d07cd6b75f966848f82c26eb WatchSource:0}: Error finding container e3b869131b8c066fec93ddf2d8ed38ed0f80acb5d07cd6b75f966848f82c26eb: Status 404 returned error can't find the container with id e3b869131b8c066fec93ddf2d8ed38ed0f80acb5d07cd6b75f966848f82c26eb Jan 05 21:57:21 crc kubenswrapper[4910]: I0105 21:57:21.632380 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-c9bdf6bbc-wnvjj"] Jan 05 21:57:21 crc kubenswrapper[4910]: W0105 21:57:21.648605 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod87e96058_622f_4c18_b232_200b27bae804.slice/crio-a3c1d9b0490507e4f7ccfb585f25fbab472c6c7d0e62989a1446b063cfde57ed WatchSource:0}: Error finding container a3c1d9b0490507e4f7ccfb585f25fbab472c6c7d0e62989a1446b063cfde57ed: Status 404 returned error can't find the container with id a3c1d9b0490507e4f7ccfb585f25fbab472c6c7d0e62989a1446b063cfde57ed Jan 05 21:57:22 crc kubenswrapper[4910]: I0105 21:57:22.234720 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-c9bdf6bbc-wnvjj" event={"ID":"87e96058-622f-4c18-b232-200b27bae804","Type":"ContainerStarted","Data":"c851efb2faa6e8b5d915a1c66f24a74fbc36bedfee39c00562a1e857fb2dc3b8"} Jan 05 21:57:22 crc kubenswrapper[4910]: I0105 21:57:22.235243 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-c9bdf6bbc-wnvjj" event={"ID":"87e96058-622f-4c18-b232-200b27bae804","Type":"ContainerStarted","Data":"a3c1d9b0490507e4f7ccfb585f25fbab472c6c7d0e62989a1446b063cfde57ed"} Jan 05 21:57:22 crc kubenswrapper[4910]: I0105 21:57:22.235664 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-c9bdf6bbc-wnvjj" Jan 05 21:57:22 crc kubenswrapper[4910]: I0105 21:57:22.239758 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-675c76bd5c-m2wml" event={"ID":"1f77dcb0-5c22-4bb7-b022-42807e5a3af2","Type":"ContainerStarted","Data":"75ea6a4bd6b94b68d07f49e72398b97dc7667d0300ed041aefa339e4501719de"} Jan 05 21:57:22 crc kubenswrapper[4910]: I0105 21:57:22.239831 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-675c76bd5c-m2wml" event={"ID":"1f77dcb0-5c22-4bb7-b022-42807e5a3af2","Type":"ContainerStarted","Data":"e3b869131b8c066fec93ddf2d8ed38ed0f80acb5d07cd6b75f966848f82c26eb"} Jan 05 21:57:22 crc kubenswrapper[4910]: I0105 21:57:22.242048 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-675c76bd5c-m2wml" Jan 05 21:57:22 crc kubenswrapper[4910]: I0105 21:57:22.246161 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-675c76bd5c-m2wml" Jan 05 21:57:22 crc kubenswrapper[4910]: I0105 21:57:22.250101 4910 generic.go:334] "Generic (PLEG): container finished" podID="9196e611-5468-4663-97c6-d50a40771bb4" containerID="942e0ea6be37c7216dc2b77346817e19218a9ff92cc29399e4d76130f0e8ce22" exitCode=0 Jan 05 21:57:22 crc kubenswrapper[4910]: I0105 21:57:22.250188 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-89z5z" event={"ID":"9196e611-5468-4663-97c6-d50a40771bb4","Type":"ContainerDied","Data":"942e0ea6be37c7216dc2b77346817e19218a9ff92cc29399e4d76130f0e8ce22"} Jan 05 21:57:22 crc kubenswrapper[4910]: I0105 21:57:22.254971 4910 generic.go:334] "Generic (PLEG): container finished" podID="36f587d4-ab14-4c64-9fe6-fd09211dd62c" containerID="ba45654f5528dfff7499b31e847c0725eac18bb98d5c44bb5453af03af65a319" exitCode=0 Jan 05 21:57:22 crc kubenswrapper[4910]: I0105 21:57:22.255015 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gj6th" event={"ID":"36f587d4-ab14-4c64-9fe6-fd09211dd62c","Type":"ContainerDied","Data":"ba45654f5528dfff7499b31e847c0725eac18bb98d5c44bb5453af03af65a319"} Jan 05 21:57:22 crc kubenswrapper[4910]: I0105 21:57:22.263423 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-c9bdf6bbc-wnvjj" podStartSLOduration=3.263407445 podStartE2EDuration="3.263407445s" podCreationTimestamp="2026-01-05 21:57:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:57:22.261187278 +0000 UTC m=+373.838684948" watchObservedRunningTime="2026-01-05 21:57:22.263407445 +0000 UTC m=+373.840905115" Jan 05 21:57:22 crc kubenswrapper[4910]: I0105 21:57:22.269381 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lc2hp" event={"ID":"61bfc4cb-601d-4bbc-8820-59f6f8de1c63","Type":"ContainerStarted","Data":"0496de862b756f582b573f485c711120c0e8d5c3d0e05036e0f5b3b556c00fd3"} Jan 05 21:57:22 crc kubenswrapper[4910]: I0105 21:57:22.276301 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-c9bdf6bbc-wnvjj" Jan 05 21:57:22 crc kubenswrapper[4910]: I0105 21:57:22.302417 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-675c76bd5c-m2wml" podStartSLOduration=3.30239356 podStartE2EDuration="3.30239356s" podCreationTimestamp="2026-01-05 21:57:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:57:22.299717231 +0000 UTC m=+373.877214891" watchObservedRunningTime="2026-01-05 21:57:22.30239356 +0000 UTC m=+373.879891230" Jan 05 21:57:22 crc kubenswrapper[4910]: I0105 21:57:22.384689 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-lc2hp" podStartSLOduration=2.935589504 podStartE2EDuration="5.384605169s" podCreationTimestamp="2026-01-05 21:57:17 +0000 UTC" firstStartedPulling="2026-01-05 21:57:19.116625736 +0000 UTC m=+370.694123406" lastFinishedPulling="2026-01-05 21:57:21.565641411 +0000 UTC m=+373.143139071" observedRunningTime="2026-01-05 21:57:22.374704074 +0000 UTC m=+373.952201774" watchObservedRunningTime="2026-01-05 21:57:22.384605169 +0000 UTC m=+373.962102839" Jan 05 21:57:23 crc kubenswrapper[4910]: I0105 21:57:23.276322 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gj6th" event={"ID":"36f587d4-ab14-4c64-9fe6-fd09211dd62c","Type":"ContainerStarted","Data":"92d905b0283af8b1870d421feec0e4b92ce4dbdf8837253820655bdfd1a941a6"} Jan 05 21:57:23 crc kubenswrapper[4910]: I0105 21:57:23.296048 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-gj6th" podStartSLOduration=2.751171843 podStartE2EDuration="4.296028761s" podCreationTimestamp="2026-01-05 21:57:19 +0000 UTC" firstStartedPulling="2026-01-05 21:57:21.152533393 +0000 UTC m=+372.730031073" lastFinishedPulling="2026-01-05 21:57:22.697390331 +0000 UTC m=+374.274887991" observedRunningTime="2026-01-05 21:57:23.294973664 +0000 UTC m=+374.872471334" watchObservedRunningTime="2026-01-05 21:57:23.296028761 +0000 UTC m=+374.873526431" Jan 05 21:57:23 crc kubenswrapper[4910]: I0105 21:57:23.444675 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-tvprt"] Jan 05 21:57:23 crc kubenswrapper[4910]: I0105 21:57:23.445932 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-tvprt" Jan 05 21:57:23 crc kubenswrapper[4910]: I0105 21:57:23.484466 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-tvprt"] Jan 05 21:57:23 crc kubenswrapper[4910]: I0105 21:57:23.561855 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/609f8015-eea4-4f1e-9a41-9b6f0e950abf-installation-pull-secrets\") pod \"image-registry-66df7c8f76-tvprt\" (UID: \"609f8015-eea4-4f1e-9a41-9b6f0e950abf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tvprt" Jan 05 21:57:23 crc kubenswrapper[4910]: I0105 21:57:23.561916 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-762vk\" (UniqueName: \"kubernetes.io/projected/609f8015-eea4-4f1e-9a41-9b6f0e950abf-kube-api-access-762vk\") pod \"image-registry-66df7c8f76-tvprt\" (UID: \"609f8015-eea4-4f1e-9a41-9b6f0e950abf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tvprt" Jan 05 21:57:23 crc kubenswrapper[4910]: I0105 21:57:23.561994 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/609f8015-eea4-4f1e-9a41-9b6f0e950abf-registry-tls\") pod \"image-registry-66df7c8f76-tvprt\" (UID: \"609f8015-eea4-4f1e-9a41-9b6f0e950abf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tvprt" Jan 05 21:57:23 crc kubenswrapper[4910]: I0105 21:57:23.562016 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/609f8015-eea4-4f1e-9a41-9b6f0e950abf-trusted-ca\") pod \"image-registry-66df7c8f76-tvprt\" (UID: \"609f8015-eea4-4f1e-9a41-9b6f0e950abf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tvprt" Jan 05 21:57:23 crc kubenswrapper[4910]: I0105 21:57:23.562066 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/609f8015-eea4-4f1e-9a41-9b6f0e950abf-ca-trust-extracted\") pod \"image-registry-66df7c8f76-tvprt\" (UID: \"609f8015-eea4-4f1e-9a41-9b6f0e950abf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tvprt" Jan 05 21:57:23 crc kubenswrapper[4910]: I0105 21:57:23.562097 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-tvprt\" (UID: \"609f8015-eea4-4f1e-9a41-9b6f0e950abf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tvprt" Jan 05 21:57:23 crc kubenswrapper[4910]: I0105 21:57:23.562316 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/609f8015-eea4-4f1e-9a41-9b6f0e950abf-bound-sa-token\") pod \"image-registry-66df7c8f76-tvprt\" (UID: \"609f8015-eea4-4f1e-9a41-9b6f0e950abf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tvprt" Jan 05 21:57:23 crc kubenswrapper[4910]: I0105 21:57:23.562483 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/609f8015-eea4-4f1e-9a41-9b6f0e950abf-registry-certificates\") pod \"image-registry-66df7c8f76-tvprt\" (UID: \"609f8015-eea4-4f1e-9a41-9b6f0e950abf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tvprt" Jan 05 21:57:23 crc kubenswrapper[4910]: I0105 21:57:23.589396 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-tvprt\" (UID: \"609f8015-eea4-4f1e-9a41-9b6f0e950abf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tvprt" Jan 05 21:57:23 crc kubenswrapper[4910]: I0105 21:57:23.664357 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/609f8015-eea4-4f1e-9a41-9b6f0e950abf-registry-tls\") pod \"image-registry-66df7c8f76-tvprt\" (UID: \"609f8015-eea4-4f1e-9a41-9b6f0e950abf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tvprt" Jan 05 21:57:23 crc kubenswrapper[4910]: I0105 21:57:23.664410 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/609f8015-eea4-4f1e-9a41-9b6f0e950abf-trusted-ca\") pod \"image-registry-66df7c8f76-tvprt\" (UID: \"609f8015-eea4-4f1e-9a41-9b6f0e950abf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tvprt" Jan 05 21:57:23 crc kubenswrapper[4910]: I0105 21:57:23.664442 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/609f8015-eea4-4f1e-9a41-9b6f0e950abf-ca-trust-extracted\") pod \"image-registry-66df7c8f76-tvprt\" (UID: \"609f8015-eea4-4f1e-9a41-9b6f0e950abf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tvprt" Jan 05 21:57:23 crc kubenswrapper[4910]: I0105 21:57:23.664472 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/609f8015-eea4-4f1e-9a41-9b6f0e950abf-bound-sa-token\") pod \"image-registry-66df7c8f76-tvprt\" (UID: \"609f8015-eea4-4f1e-9a41-9b6f0e950abf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tvprt" Jan 05 21:57:23 crc kubenswrapper[4910]: I0105 21:57:23.664507 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/609f8015-eea4-4f1e-9a41-9b6f0e950abf-registry-certificates\") pod \"image-registry-66df7c8f76-tvprt\" (UID: \"609f8015-eea4-4f1e-9a41-9b6f0e950abf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tvprt" Jan 05 21:57:23 crc kubenswrapper[4910]: I0105 21:57:23.664530 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/609f8015-eea4-4f1e-9a41-9b6f0e950abf-installation-pull-secrets\") pod \"image-registry-66df7c8f76-tvprt\" (UID: \"609f8015-eea4-4f1e-9a41-9b6f0e950abf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tvprt" Jan 05 21:57:23 crc kubenswrapper[4910]: I0105 21:57:23.664551 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-762vk\" (UniqueName: \"kubernetes.io/projected/609f8015-eea4-4f1e-9a41-9b6f0e950abf-kube-api-access-762vk\") pod \"image-registry-66df7c8f76-tvprt\" (UID: \"609f8015-eea4-4f1e-9a41-9b6f0e950abf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tvprt" Jan 05 21:57:23 crc kubenswrapper[4910]: I0105 21:57:23.665742 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/609f8015-eea4-4f1e-9a41-9b6f0e950abf-ca-trust-extracted\") pod \"image-registry-66df7c8f76-tvprt\" (UID: \"609f8015-eea4-4f1e-9a41-9b6f0e950abf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tvprt" Jan 05 21:57:23 crc kubenswrapper[4910]: I0105 21:57:23.665935 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/609f8015-eea4-4f1e-9a41-9b6f0e950abf-trusted-ca\") pod \"image-registry-66df7c8f76-tvprt\" (UID: \"609f8015-eea4-4f1e-9a41-9b6f0e950abf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tvprt" Jan 05 21:57:23 crc kubenswrapper[4910]: I0105 21:57:23.666581 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/609f8015-eea4-4f1e-9a41-9b6f0e950abf-registry-certificates\") pod \"image-registry-66df7c8f76-tvprt\" (UID: \"609f8015-eea4-4f1e-9a41-9b6f0e950abf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tvprt" Jan 05 21:57:23 crc kubenswrapper[4910]: I0105 21:57:23.671323 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/609f8015-eea4-4f1e-9a41-9b6f0e950abf-registry-tls\") pod \"image-registry-66df7c8f76-tvprt\" (UID: \"609f8015-eea4-4f1e-9a41-9b6f0e950abf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tvprt" Jan 05 21:57:23 crc kubenswrapper[4910]: I0105 21:57:23.671688 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/609f8015-eea4-4f1e-9a41-9b6f0e950abf-installation-pull-secrets\") pod \"image-registry-66df7c8f76-tvprt\" (UID: \"609f8015-eea4-4f1e-9a41-9b6f0e950abf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tvprt" Jan 05 21:57:23 crc kubenswrapper[4910]: I0105 21:57:23.681479 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/609f8015-eea4-4f1e-9a41-9b6f0e950abf-bound-sa-token\") pod \"image-registry-66df7c8f76-tvprt\" (UID: \"609f8015-eea4-4f1e-9a41-9b6f0e950abf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tvprt" Jan 05 21:57:23 crc kubenswrapper[4910]: I0105 21:57:23.684600 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-762vk\" (UniqueName: \"kubernetes.io/projected/609f8015-eea4-4f1e-9a41-9b6f0e950abf-kube-api-access-762vk\") pod \"image-registry-66df7c8f76-tvprt\" (UID: \"609f8015-eea4-4f1e-9a41-9b6f0e950abf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tvprt" Jan 05 21:57:23 crc kubenswrapper[4910]: I0105 21:57:23.764805 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-tvprt" Jan 05 21:57:24 crc kubenswrapper[4910]: I0105 21:57:24.219365 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-tvprt"] Jan 05 21:57:24 crc kubenswrapper[4910]: I0105 21:57:24.285442 4910 generic.go:334] "Generic (PLEG): container finished" podID="9196e611-5468-4663-97c6-d50a40771bb4" containerID="d1c49b975b42e74d9abaeb1cb6fa46a8f4c73a225f96db6b6434464aae173aa6" exitCode=0 Jan 05 21:57:24 crc kubenswrapper[4910]: I0105 21:57:24.285529 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-89z5z" event={"ID":"9196e611-5468-4663-97c6-d50a40771bb4","Type":"ContainerDied","Data":"d1c49b975b42e74d9abaeb1cb6fa46a8f4c73a225f96db6b6434464aae173aa6"} Jan 05 21:57:24 crc kubenswrapper[4910]: I0105 21:57:24.291639 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-tvprt" event={"ID":"609f8015-eea4-4f1e-9a41-9b6f0e950abf","Type":"ContainerStarted","Data":"4db98487de2824c00cb07c700bd00f028805b744077d1a6d30efea22f48fb0ee"} Jan 05 21:57:25 crc kubenswrapper[4910]: I0105 21:57:25.301180 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-tvprt" event={"ID":"609f8015-eea4-4f1e-9a41-9b6f0e950abf","Type":"ContainerStarted","Data":"d6312628cf41dfe8a772911633f3303f94c7c64e9b8d68bdaca32c5d29f7d723"} Jan 05 21:57:25 crc kubenswrapper[4910]: I0105 21:57:25.301862 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-89z5z" event={"ID":"9196e611-5468-4663-97c6-d50a40771bb4","Type":"ContainerStarted","Data":"4e696a82e1735dbb407928ba93d2e0a8897eaf15729f9fe1387509957c32674c"} Jan 05 21:57:25 crc kubenswrapper[4910]: I0105 21:57:25.301906 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-tvprt" Jan 05 21:57:25 crc kubenswrapper[4910]: I0105 21:57:25.330285 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-tvprt" podStartSLOduration=2.330256883 podStartE2EDuration="2.330256883s" podCreationTimestamp="2026-01-05 21:57:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 21:57:25.328645652 +0000 UTC m=+376.906143322" watchObservedRunningTime="2026-01-05 21:57:25.330256883 +0000 UTC m=+376.907754553" Jan 05 21:57:25 crc kubenswrapper[4910]: I0105 21:57:25.350277 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-89z5z" podStartSLOduration=2.91192312 podStartE2EDuration="5.350245139s" podCreationTimestamp="2026-01-05 21:57:20 +0000 UTC" firstStartedPulling="2026-01-05 21:57:22.251707673 +0000 UTC m=+373.829205343" lastFinishedPulling="2026-01-05 21:57:24.690029692 +0000 UTC m=+376.267527362" observedRunningTime="2026-01-05 21:57:25.34680788 +0000 UTC m=+376.924305540" watchObservedRunningTime="2026-01-05 21:57:25.350245139 +0000 UTC m=+376.927742809" Jan 05 21:57:26 crc kubenswrapper[4910]: I0105 21:57:26.958076 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-swkg7" Jan 05 21:57:26 crc kubenswrapper[4910]: I0105 21:57:26.958470 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-swkg7" Jan 05 21:57:27 crc kubenswrapper[4910]: I0105 21:57:27.007209 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-swkg7" Jan 05 21:57:27 crc kubenswrapper[4910]: I0105 21:57:27.358060 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-swkg7" Jan 05 21:57:27 crc kubenswrapper[4910]: I0105 21:57:27.956738 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-lc2hp" Jan 05 21:57:27 crc kubenswrapper[4910]: I0105 21:57:27.956841 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-lc2hp" Jan 05 21:57:27 crc kubenswrapper[4910]: I0105 21:57:27.996531 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-lc2hp" Jan 05 21:57:28 crc kubenswrapper[4910]: I0105 21:57:28.353364 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-lc2hp" Jan 05 21:57:29 crc kubenswrapper[4910]: I0105 21:57:29.598431 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-gj6th" Jan 05 21:57:29 crc kubenswrapper[4910]: I0105 21:57:29.598516 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-gj6th" Jan 05 21:57:29 crc kubenswrapper[4910]: I0105 21:57:29.634704 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-gj6th" Jan 05 21:57:30 crc kubenswrapper[4910]: I0105 21:57:30.357345 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-gj6th" Jan 05 21:57:30 crc kubenswrapper[4910]: I0105 21:57:30.575706 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-89z5z" Jan 05 21:57:30 crc kubenswrapper[4910]: I0105 21:57:30.575948 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-89z5z" Jan 05 21:57:30 crc kubenswrapper[4910]: I0105 21:57:30.610523 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-89z5z" Jan 05 21:57:31 crc kubenswrapper[4910]: I0105 21:57:31.373837 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-89z5z" Jan 05 21:57:40 crc kubenswrapper[4910]: I0105 21:57:40.952746 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 21:57:40 crc kubenswrapper[4910]: I0105 21:57:40.953427 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 21:57:43 crc kubenswrapper[4910]: I0105 21:57:43.770565 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-tvprt" Jan 05 21:57:43 crc kubenswrapper[4910]: I0105 21:57:43.834717 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-xg5fl"] Jan 05 21:58:08 crc kubenswrapper[4910]: I0105 21:58:08.882883 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" podUID="cb24f5dd-82b6-4a8e-8e86-b639a8435bf8" containerName="registry" containerID="cri-o://ceb2190978cd52cf404ea8a77a5ef48bd7dd4a9ff589627d508e196cde33480f" gracePeriod=30 Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.305002 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.329084 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-ca-trust-extracted\") pod \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.329176 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-registry-certificates\") pod \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.329246 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4ldcv\" (UniqueName: \"kubernetes.io/projected/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-kube-api-access-4ldcv\") pod \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.329280 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-bound-sa-token\") pod \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.329404 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-trusted-ca\") pod \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.329766 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.329840 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-installation-pull-secrets\") pod \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.329901 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-registry-tls\") pod \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\" (UID: \"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8\") " Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.331317 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.333625 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.336403 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.336614 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.343030 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-kube-api-access-4ldcv" (OuterVolumeSpecName: "kube-api-access-4ldcv") pod "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8"). InnerVolumeSpecName "kube-api-access-4ldcv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.343512 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.349896 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.350695 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8" (UID: "cb24f5dd-82b6-4a8e-8e86-b639a8435bf8"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.432211 4910 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.432250 4910 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.432265 4910 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.432280 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4ldcv\" (UniqueName: \"kubernetes.io/projected/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-kube-api-access-4ldcv\") on node \"crc\" DevicePath \"\"" Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.432292 4910 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.432302 4910 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.432311 4910 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.557630 4910 generic.go:334] "Generic (PLEG): container finished" podID="cb24f5dd-82b6-4a8e-8e86-b639a8435bf8" containerID="ceb2190978cd52cf404ea8a77a5ef48bd7dd4a9ff589627d508e196cde33480f" exitCode=0 Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.557685 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" event={"ID":"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8","Type":"ContainerDied","Data":"ceb2190978cd52cf404ea8a77a5ef48bd7dd4a9ff589627d508e196cde33480f"} Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.557701 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.557732 4910 scope.go:117] "RemoveContainer" containerID="ceb2190978cd52cf404ea8a77a5ef48bd7dd4a9ff589627d508e196cde33480f" Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.557716 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-xg5fl" event={"ID":"cb24f5dd-82b6-4a8e-8e86-b639a8435bf8","Type":"ContainerDied","Data":"81ce46da37b8920fc256e98069a7b4e197e0aee4b128ccaaa8b41c707d97279e"} Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.576957 4910 scope.go:117] "RemoveContainer" containerID="ceb2190978cd52cf404ea8a77a5ef48bd7dd4a9ff589627d508e196cde33480f" Jan 05 21:58:09 crc kubenswrapper[4910]: E0105 21:58:09.577727 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ceb2190978cd52cf404ea8a77a5ef48bd7dd4a9ff589627d508e196cde33480f\": container with ID starting with ceb2190978cd52cf404ea8a77a5ef48bd7dd4a9ff589627d508e196cde33480f not found: ID does not exist" containerID="ceb2190978cd52cf404ea8a77a5ef48bd7dd4a9ff589627d508e196cde33480f" Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.577762 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ceb2190978cd52cf404ea8a77a5ef48bd7dd4a9ff589627d508e196cde33480f"} err="failed to get container status \"ceb2190978cd52cf404ea8a77a5ef48bd7dd4a9ff589627d508e196cde33480f\": rpc error: code = NotFound desc = could not find container \"ceb2190978cd52cf404ea8a77a5ef48bd7dd4a9ff589627d508e196cde33480f\": container with ID starting with ceb2190978cd52cf404ea8a77a5ef48bd7dd4a9ff589627d508e196cde33480f not found: ID does not exist" Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.594269 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-xg5fl"] Jan 05 21:58:09 crc kubenswrapper[4910]: I0105 21:58:09.598898 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-xg5fl"] Jan 05 21:58:10 crc kubenswrapper[4910]: I0105 21:58:10.729170 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb24f5dd-82b6-4a8e-8e86-b639a8435bf8" path="/var/lib/kubelet/pods/cb24f5dd-82b6-4a8e-8e86-b639a8435bf8/volumes" Jan 05 21:58:10 crc kubenswrapper[4910]: I0105 21:58:10.952902 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 21:58:10 crc kubenswrapper[4910]: I0105 21:58:10.953028 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 21:58:10 crc kubenswrapper[4910]: I0105 21:58:10.953083 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 21:58:10 crc kubenswrapper[4910]: I0105 21:58:10.954190 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"15612c8cccfa06b0cc74957c3ccd1b20e53a5417a6eefdbf59c2e8cdfb185ad1"} pod="openshift-machine-config-operator/machine-config-daemon-p4t85" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 05 21:58:10 crc kubenswrapper[4910]: I0105 21:58:10.954272 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" containerID="cri-o://15612c8cccfa06b0cc74957c3ccd1b20e53a5417a6eefdbf59c2e8cdfb185ad1" gracePeriod=600 Jan 05 21:58:11 crc kubenswrapper[4910]: I0105 21:58:11.573273 4910 generic.go:334] "Generic (PLEG): container finished" podID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerID="15612c8cccfa06b0cc74957c3ccd1b20e53a5417a6eefdbf59c2e8cdfb185ad1" exitCode=0 Jan 05 21:58:11 crc kubenswrapper[4910]: I0105 21:58:11.573361 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerDied","Data":"15612c8cccfa06b0cc74957c3ccd1b20e53a5417a6eefdbf59c2e8cdfb185ad1"} Jan 05 21:58:11 crc kubenswrapper[4910]: I0105 21:58:11.574276 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerStarted","Data":"24ad24a0bc4cca661f52af59417069858c5167c646d199a5c1c243653f4dbcbf"} Jan 05 21:58:11 crc kubenswrapper[4910]: I0105 21:58:11.574315 4910 scope.go:117] "RemoveContainer" containerID="c548d88a79922aab747d1b48bd38a44effbc2b7a587c5e9b1254fcbe0b0e2adb" Jan 05 22:00:00 crc kubenswrapper[4910]: I0105 22:00:00.168664 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460840-89jfq"] Jan 05 22:00:00 crc kubenswrapper[4910]: E0105 22:00:00.171143 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb24f5dd-82b6-4a8e-8e86-b639a8435bf8" containerName="registry" Jan 05 22:00:00 crc kubenswrapper[4910]: I0105 22:00:00.171289 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb24f5dd-82b6-4a8e-8e86-b639a8435bf8" containerName="registry" Jan 05 22:00:00 crc kubenswrapper[4910]: I0105 22:00:00.171571 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb24f5dd-82b6-4a8e-8e86-b639a8435bf8" containerName="registry" Jan 05 22:00:00 crc kubenswrapper[4910]: I0105 22:00:00.174773 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460840-89jfq" Jan 05 22:00:00 crc kubenswrapper[4910]: I0105 22:00:00.176989 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 05 22:00:00 crc kubenswrapper[4910]: I0105 22:00:00.177487 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460840-89jfq"] Jan 05 22:00:00 crc kubenswrapper[4910]: I0105 22:00:00.179503 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 05 22:00:00 crc kubenswrapper[4910]: I0105 22:00:00.188319 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54m8c\" (UniqueName: \"kubernetes.io/projected/fe30943a-f40d-49bd-b9a8-bb0b6e1701d8-kube-api-access-54m8c\") pod \"collect-profiles-29460840-89jfq\" (UID: \"fe30943a-f40d-49bd-b9a8-bb0b6e1701d8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460840-89jfq" Jan 05 22:00:00 crc kubenswrapper[4910]: I0105 22:00:00.188376 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fe30943a-f40d-49bd-b9a8-bb0b6e1701d8-config-volume\") pod \"collect-profiles-29460840-89jfq\" (UID: \"fe30943a-f40d-49bd-b9a8-bb0b6e1701d8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460840-89jfq" Jan 05 22:00:00 crc kubenswrapper[4910]: I0105 22:00:00.188414 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fe30943a-f40d-49bd-b9a8-bb0b6e1701d8-secret-volume\") pod \"collect-profiles-29460840-89jfq\" (UID: \"fe30943a-f40d-49bd-b9a8-bb0b6e1701d8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460840-89jfq" Jan 05 22:00:00 crc kubenswrapper[4910]: I0105 22:00:00.290541 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fe30943a-f40d-49bd-b9a8-bb0b6e1701d8-secret-volume\") pod \"collect-profiles-29460840-89jfq\" (UID: \"fe30943a-f40d-49bd-b9a8-bb0b6e1701d8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460840-89jfq" Jan 05 22:00:00 crc kubenswrapper[4910]: I0105 22:00:00.290640 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54m8c\" (UniqueName: \"kubernetes.io/projected/fe30943a-f40d-49bd-b9a8-bb0b6e1701d8-kube-api-access-54m8c\") pod \"collect-profiles-29460840-89jfq\" (UID: \"fe30943a-f40d-49bd-b9a8-bb0b6e1701d8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460840-89jfq" Jan 05 22:00:00 crc kubenswrapper[4910]: I0105 22:00:00.290678 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fe30943a-f40d-49bd-b9a8-bb0b6e1701d8-config-volume\") pod \"collect-profiles-29460840-89jfq\" (UID: \"fe30943a-f40d-49bd-b9a8-bb0b6e1701d8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460840-89jfq" Jan 05 22:00:00 crc kubenswrapper[4910]: I0105 22:00:00.293425 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fe30943a-f40d-49bd-b9a8-bb0b6e1701d8-config-volume\") pod \"collect-profiles-29460840-89jfq\" (UID: \"fe30943a-f40d-49bd-b9a8-bb0b6e1701d8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460840-89jfq" Jan 05 22:00:00 crc kubenswrapper[4910]: I0105 22:00:00.304325 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fe30943a-f40d-49bd-b9a8-bb0b6e1701d8-secret-volume\") pod \"collect-profiles-29460840-89jfq\" (UID: \"fe30943a-f40d-49bd-b9a8-bb0b6e1701d8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460840-89jfq" Jan 05 22:00:00 crc kubenswrapper[4910]: I0105 22:00:00.307022 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54m8c\" (UniqueName: \"kubernetes.io/projected/fe30943a-f40d-49bd-b9a8-bb0b6e1701d8-kube-api-access-54m8c\") pod \"collect-profiles-29460840-89jfq\" (UID: \"fe30943a-f40d-49bd-b9a8-bb0b6e1701d8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460840-89jfq" Jan 05 22:00:00 crc kubenswrapper[4910]: I0105 22:00:00.491894 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460840-89jfq" Jan 05 22:00:00 crc kubenswrapper[4910]: I0105 22:00:00.665274 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460840-89jfq"] Jan 05 22:00:01 crc kubenswrapper[4910]: I0105 22:00:01.253826 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29460840-89jfq" event={"ID":"fe30943a-f40d-49bd-b9a8-bb0b6e1701d8","Type":"ContainerStarted","Data":"c639464b5decea442ee471624078a0785f03f3dc94d3ef7cf032ff156dda4a02"} Jan 05 22:00:02 crc kubenswrapper[4910]: I0105 22:00:02.264769 4910 generic.go:334] "Generic (PLEG): container finished" podID="fe30943a-f40d-49bd-b9a8-bb0b6e1701d8" containerID="11fde922fb9628da7da35571440344c7219779235ab7eabad9522924ee3f8703" exitCode=0 Jan 05 22:00:02 crc kubenswrapper[4910]: I0105 22:00:02.264848 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29460840-89jfq" event={"ID":"fe30943a-f40d-49bd-b9a8-bb0b6e1701d8","Type":"ContainerDied","Data":"11fde922fb9628da7da35571440344c7219779235ab7eabad9522924ee3f8703"} Jan 05 22:00:03 crc kubenswrapper[4910]: I0105 22:00:03.558167 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460840-89jfq" Jan 05 22:00:03 crc kubenswrapper[4910]: I0105 22:00:03.750630 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-54m8c\" (UniqueName: \"kubernetes.io/projected/fe30943a-f40d-49bd-b9a8-bb0b6e1701d8-kube-api-access-54m8c\") pod \"fe30943a-f40d-49bd-b9a8-bb0b6e1701d8\" (UID: \"fe30943a-f40d-49bd-b9a8-bb0b6e1701d8\") " Jan 05 22:00:03 crc kubenswrapper[4910]: I0105 22:00:03.750773 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fe30943a-f40d-49bd-b9a8-bb0b6e1701d8-config-volume\") pod \"fe30943a-f40d-49bd-b9a8-bb0b6e1701d8\" (UID: \"fe30943a-f40d-49bd-b9a8-bb0b6e1701d8\") " Jan 05 22:00:03 crc kubenswrapper[4910]: I0105 22:00:03.750816 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fe30943a-f40d-49bd-b9a8-bb0b6e1701d8-secret-volume\") pod \"fe30943a-f40d-49bd-b9a8-bb0b6e1701d8\" (UID: \"fe30943a-f40d-49bd-b9a8-bb0b6e1701d8\") " Jan 05 22:00:03 crc kubenswrapper[4910]: I0105 22:00:03.752301 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fe30943a-f40d-49bd-b9a8-bb0b6e1701d8-config-volume" (OuterVolumeSpecName: "config-volume") pod "fe30943a-f40d-49bd-b9a8-bb0b6e1701d8" (UID: "fe30943a-f40d-49bd-b9a8-bb0b6e1701d8"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:00:03 crc kubenswrapper[4910]: I0105 22:00:03.752937 4910 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fe30943a-f40d-49bd-b9a8-bb0b6e1701d8-config-volume\") on node \"crc\" DevicePath \"\"" Jan 05 22:00:03 crc kubenswrapper[4910]: I0105 22:00:03.758828 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe30943a-f40d-49bd-b9a8-bb0b6e1701d8-kube-api-access-54m8c" (OuterVolumeSpecName: "kube-api-access-54m8c") pod "fe30943a-f40d-49bd-b9a8-bb0b6e1701d8" (UID: "fe30943a-f40d-49bd-b9a8-bb0b6e1701d8"). InnerVolumeSpecName "kube-api-access-54m8c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:00:03 crc kubenswrapper[4910]: I0105 22:00:03.759091 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe30943a-f40d-49bd-b9a8-bb0b6e1701d8-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "fe30943a-f40d-49bd-b9a8-bb0b6e1701d8" (UID: "fe30943a-f40d-49bd-b9a8-bb0b6e1701d8"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:00:03 crc kubenswrapper[4910]: I0105 22:00:03.854558 4910 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fe30943a-f40d-49bd-b9a8-bb0b6e1701d8-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 05 22:00:03 crc kubenswrapper[4910]: I0105 22:00:03.854635 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-54m8c\" (UniqueName: \"kubernetes.io/projected/fe30943a-f40d-49bd-b9a8-bb0b6e1701d8-kube-api-access-54m8c\") on node \"crc\" DevicePath \"\"" Jan 05 22:00:04 crc kubenswrapper[4910]: I0105 22:00:04.293642 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29460840-89jfq" event={"ID":"fe30943a-f40d-49bd-b9a8-bb0b6e1701d8","Type":"ContainerDied","Data":"c639464b5decea442ee471624078a0785f03f3dc94d3ef7cf032ff156dda4a02"} Jan 05 22:00:04 crc kubenswrapper[4910]: I0105 22:00:04.294021 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c639464b5decea442ee471624078a0785f03f3dc94d3ef7cf032ff156dda4a02" Jan 05 22:00:04 crc kubenswrapper[4910]: I0105 22:00:04.293703 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460840-89jfq" Jan 05 22:00:40 crc kubenswrapper[4910]: I0105 22:00:40.953248 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:00:40 crc kubenswrapper[4910]: I0105 22:00:40.953865 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:01:10 crc kubenswrapper[4910]: I0105 22:01:10.953003 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:01:10 crc kubenswrapper[4910]: I0105 22:01:10.953806 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:01:40 crc kubenswrapper[4910]: I0105 22:01:40.952429 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:01:40 crc kubenswrapper[4910]: I0105 22:01:40.953403 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:01:40 crc kubenswrapper[4910]: I0105 22:01:40.953472 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 22:01:40 crc kubenswrapper[4910]: I0105 22:01:40.954305 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"24ad24a0bc4cca661f52af59417069858c5167c646d199a5c1c243653f4dbcbf"} pod="openshift-machine-config-operator/machine-config-daemon-p4t85" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 05 22:01:40 crc kubenswrapper[4910]: I0105 22:01:40.954369 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" containerID="cri-o://24ad24a0bc4cca661f52af59417069858c5167c646d199a5c1c243653f4dbcbf" gracePeriod=600 Jan 05 22:01:41 crc kubenswrapper[4910]: I0105 22:01:41.857861 4910 generic.go:334] "Generic (PLEG): container finished" podID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerID="24ad24a0bc4cca661f52af59417069858c5167c646d199a5c1c243653f4dbcbf" exitCode=0 Jan 05 22:01:41 crc kubenswrapper[4910]: I0105 22:01:41.857933 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerDied","Data":"24ad24a0bc4cca661f52af59417069858c5167c646d199a5c1c243653f4dbcbf"} Jan 05 22:01:41 crc kubenswrapper[4910]: I0105 22:01:41.858398 4910 scope.go:117] "RemoveContainer" containerID="15612c8cccfa06b0cc74957c3ccd1b20e53a5417a6eefdbf59c2e8cdfb185ad1" Jan 05 22:01:42 crc kubenswrapper[4910]: I0105 22:01:42.868191 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerStarted","Data":"9e520e28b4c82f9c661ef0957d57afd6c58639ff887c3906d5a2d181968d14b2"} Jan 05 22:02:51 crc kubenswrapper[4910]: I0105 22:02:51.821434 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-4dq9c"] Jan 05 22:02:51 crc kubenswrapper[4910]: E0105 22:02:51.822387 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe30943a-f40d-49bd-b9a8-bb0b6e1701d8" containerName="collect-profiles" Jan 05 22:02:51 crc kubenswrapper[4910]: I0105 22:02:51.822407 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe30943a-f40d-49bd-b9a8-bb0b6e1701d8" containerName="collect-profiles" Jan 05 22:02:51 crc kubenswrapper[4910]: I0105 22:02:51.822514 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe30943a-f40d-49bd-b9a8-bb0b6e1701d8" containerName="collect-profiles" Jan 05 22:02:51 crc kubenswrapper[4910]: I0105 22:02:51.822946 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-4dq9c" Jan 05 22:02:51 crc kubenswrapper[4910]: I0105 22:02:51.827079 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Jan 05 22:02:51 crc kubenswrapper[4910]: I0105 22:02:51.827149 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Jan 05 22:02:51 crc kubenswrapper[4910]: I0105 22:02:51.827272 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Jan 05 22:02:51 crc kubenswrapper[4910]: I0105 22:02:51.827272 4910 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-wtn8h" Jan 05 22:02:51 crc kubenswrapper[4910]: I0105 22:02:51.834401 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-4dq9c"] Jan 05 22:02:51 crc kubenswrapper[4910]: I0105 22:02:51.996685 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/1b5c5e69-e0b4-4616-8284-99ca77b66846-node-mnt\") pod \"crc-storage-crc-4dq9c\" (UID: \"1b5c5e69-e0b4-4616-8284-99ca77b66846\") " pod="crc-storage/crc-storage-crc-4dq9c" Jan 05 22:02:51 crc kubenswrapper[4910]: I0105 22:02:51.996749 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/1b5c5e69-e0b4-4616-8284-99ca77b66846-crc-storage\") pod \"crc-storage-crc-4dq9c\" (UID: \"1b5c5e69-e0b4-4616-8284-99ca77b66846\") " pod="crc-storage/crc-storage-crc-4dq9c" Jan 05 22:02:51 crc kubenswrapper[4910]: I0105 22:02:51.996775 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glnj2\" (UniqueName: \"kubernetes.io/projected/1b5c5e69-e0b4-4616-8284-99ca77b66846-kube-api-access-glnj2\") pod \"crc-storage-crc-4dq9c\" (UID: \"1b5c5e69-e0b4-4616-8284-99ca77b66846\") " pod="crc-storage/crc-storage-crc-4dq9c" Jan 05 22:02:52 crc kubenswrapper[4910]: I0105 22:02:52.097955 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glnj2\" (UniqueName: \"kubernetes.io/projected/1b5c5e69-e0b4-4616-8284-99ca77b66846-kube-api-access-glnj2\") pod \"crc-storage-crc-4dq9c\" (UID: \"1b5c5e69-e0b4-4616-8284-99ca77b66846\") " pod="crc-storage/crc-storage-crc-4dq9c" Jan 05 22:02:52 crc kubenswrapper[4910]: I0105 22:02:52.098094 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/1b5c5e69-e0b4-4616-8284-99ca77b66846-node-mnt\") pod \"crc-storage-crc-4dq9c\" (UID: \"1b5c5e69-e0b4-4616-8284-99ca77b66846\") " pod="crc-storage/crc-storage-crc-4dq9c" Jan 05 22:02:52 crc kubenswrapper[4910]: I0105 22:02:52.098153 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/1b5c5e69-e0b4-4616-8284-99ca77b66846-crc-storage\") pod \"crc-storage-crc-4dq9c\" (UID: \"1b5c5e69-e0b4-4616-8284-99ca77b66846\") " pod="crc-storage/crc-storage-crc-4dq9c" Jan 05 22:02:52 crc kubenswrapper[4910]: I0105 22:02:52.098338 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/1b5c5e69-e0b4-4616-8284-99ca77b66846-node-mnt\") pod \"crc-storage-crc-4dq9c\" (UID: \"1b5c5e69-e0b4-4616-8284-99ca77b66846\") " pod="crc-storage/crc-storage-crc-4dq9c" Jan 05 22:02:52 crc kubenswrapper[4910]: I0105 22:02:52.099003 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/1b5c5e69-e0b4-4616-8284-99ca77b66846-crc-storage\") pod \"crc-storage-crc-4dq9c\" (UID: \"1b5c5e69-e0b4-4616-8284-99ca77b66846\") " pod="crc-storage/crc-storage-crc-4dq9c" Jan 05 22:02:52 crc kubenswrapper[4910]: I0105 22:02:52.126219 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glnj2\" (UniqueName: \"kubernetes.io/projected/1b5c5e69-e0b4-4616-8284-99ca77b66846-kube-api-access-glnj2\") pod \"crc-storage-crc-4dq9c\" (UID: \"1b5c5e69-e0b4-4616-8284-99ca77b66846\") " pod="crc-storage/crc-storage-crc-4dq9c" Jan 05 22:02:52 crc kubenswrapper[4910]: I0105 22:02:52.151135 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-4dq9c" Jan 05 22:02:52 crc kubenswrapper[4910]: I0105 22:02:52.389478 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-4dq9c"] Jan 05 22:02:52 crc kubenswrapper[4910]: I0105 22:02:52.395728 4910 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 05 22:02:53 crc kubenswrapper[4910]: I0105 22:02:53.300827 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-4dq9c" event={"ID":"1b5c5e69-e0b4-4616-8284-99ca77b66846","Type":"ContainerStarted","Data":"243d21813c2c69de416fce502fa334eee486fc16cb7ec15325161f72333469d9"} Jan 05 22:02:54 crc kubenswrapper[4910]: I0105 22:02:54.309301 4910 generic.go:334] "Generic (PLEG): container finished" podID="1b5c5e69-e0b4-4616-8284-99ca77b66846" containerID="c6d0b7b76eace8e3666c9a94ff4f4bf4ba21a302e0bd644e48b5bd1e7499514c" exitCode=0 Jan 05 22:02:54 crc kubenswrapper[4910]: I0105 22:02:54.309442 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-4dq9c" event={"ID":"1b5c5e69-e0b4-4616-8284-99ca77b66846","Type":"ContainerDied","Data":"c6d0b7b76eace8e3666c9a94ff4f4bf4ba21a302e0bd644e48b5bd1e7499514c"} Jan 05 22:02:55 crc kubenswrapper[4910]: I0105 22:02:55.551523 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-4dq9c" Jan 05 22:02:55 crc kubenswrapper[4910]: I0105 22:02:55.669643 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/1b5c5e69-e0b4-4616-8284-99ca77b66846-crc-storage\") pod \"1b5c5e69-e0b4-4616-8284-99ca77b66846\" (UID: \"1b5c5e69-e0b4-4616-8284-99ca77b66846\") " Jan 05 22:02:55 crc kubenswrapper[4910]: I0105 22:02:55.669767 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/1b5c5e69-e0b4-4616-8284-99ca77b66846-node-mnt\") pod \"1b5c5e69-e0b4-4616-8284-99ca77b66846\" (UID: \"1b5c5e69-e0b4-4616-8284-99ca77b66846\") " Jan 05 22:02:55 crc kubenswrapper[4910]: I0105 22:02:55.669880 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-glnj2\" (UniqueName: \"kubernetes.io/projected/1b5c5e69-e0b4-4616-8284-99ca77b66846-kube-api-access-glnj2\") pod \"1b5c5e69-e0b4-4616-8284-99ca77b66846\" (UID: \"1b5c5e69-e0b4-4616-8284-99ca77b66846\") " Jan 05 22:02:55 crc kubenswrapper[4910]: I0105 22:02:55.673211 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1b5c5e69-e0b4-4616-8284-99ca77b66846-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "1b5c5e69-e0b4-4616-8284-99ca77b66846" (UID: "1b5c5e69-e0b4-4616-8284-99ca77b66846"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:02:55 crc kubenswrapper[4910]: I0105 22:02:55.691161 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b5c5e69-e0b4-4616-8284-99ca77b66846-kube-api-access-glnj2" (OuterVolumeSpecName: "kube-api-access-glnj2") pod "1b5c5e69-e0b4-4616-8284-99ca77b66846" (UID: "1b5c5e69-e0b4-4616-8284-99ca77b66846"). InnerVolumeSpecName "kube-api-access-glnj2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:02:55 crc kubenswrapper[4910]: I0105 22:02:55.691915 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b5c5e69-e0b4-4616-8284-99ca77b66846-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "1b5c5e69-e0b4-4616-8284-99ca77b66846" (UID: "1b5c5e69-e0b4-4616-8284-99ca77b66846"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:02:55 crc kubenswrapper[4910]: I0105 22:02:55.771319 4910 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/1b5c5e69-e0b4-4616-8284-99ca77b66846-node-mnt\") on node \"crc\" DevicePath \"\"" Jan 05 22:02:55 crc kubenswrapper[4910]: I0105 22:02:55.771597 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-glnj2\" (UniqueName: \"kubernetes.io/projected/1b5c5e69-e0b4-4616-8284-99ca77b66846-kube-api-access-glnj2\") on node \"crc\" DevicePath \"\"" Jan 05 22:02:55 crc kubenswrapper[4910]: I0105 22:02:55.771607 4910 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/1b5c5e69-e0b4-4616-8284-99ca77b66846-crc-storage\") on node \"crc\" DevicePath \"\"" Jan 05 22:02:56 crc kubenswrapper[4910]: I0105 22:02:56.324322 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-4dq9c" event={"ID":"1b5c5e69-e0b4-4616-8284-99ca77b66846","Type":"ContainerDied","Data":"243d21813c2c69de416fce502fa334eee486fc16cb7ec15325161f72333469d9"} Jan 05 22:02:56 crc kubenswrapper[4910]: I0105 22:02:56.324384 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="243d21813c2c69de416fce502fa334eee486fc16cb7ec15325161f72333469d9" Jan 05 22:02:56 crc kubenswrapper[4910]: I0105 22:02:56.324459 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-4dq9c" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.653175 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-fpk76"] Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.653864 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="ovn-controller" containerID="cri-o://4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3" gracePeriod=30 Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.653993 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="sbdb" containerID="cri-o://7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8" gracePeriod=30 Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.653994 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4" gracePeriod=30 Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.654105 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="northd" containerID="cri-o://15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947" gracePeriod=30 Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.654168 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="ovn-acl-logging" containerID="cri-o://94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062" gracePeriod=30 Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.654153 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="kube-rbac-proxy-node" containerID="cri-o://f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620" gracePeriod=30 Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.654269 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="nbdb" containerID="cri-o://5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d" gracePeriod=30 Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.685794 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="ovnkube-controller" containerID="cri-o://38d9833c4a5e415a92059ebf4d861d5b054b8235938e1980f88e206d819daee6" gracePeriod=30 Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.930976 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpk76_f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b/ovnkube-controller/3.log" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.933504 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpk76_f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b/ovn-acl-logging/0.log" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.934034 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpk76_f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b/ovn-controller/0.log" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.934469 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.956098 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-cni-netd\") pod \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.956199 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-run-openvswitch\") pod \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.956218 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" (UID: "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.956248 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gjpvg\" (UniqueName: \"kubernetes.io/projected/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-kube-api-access-gjpvg\") pod \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.956349 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-systemd-units\") pod \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.956393 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-ovnkube-script-lib\") pod \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.956428 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-ovnkube-config\") pod \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.956478 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-env-overrides\") pod \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.956498 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-run-ovn\") pod \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.956516 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-run-systemd\") pod \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.956543 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-ovn-node-metrics-cert\") pod \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.956578 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-cni-bin\") pod \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.956602 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-run-ovn-kubernetes\") pod \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.956615 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-kubelet\") pod \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.956633 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-var-lib-cni-networks-ovn-kubernetes\") pod \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.956650 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-etc-openvswitch\") pod \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.956663 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-slash\") pod \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.956686 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-node-log\") pod \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.956718 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-var-lib-openvswitch\") pod \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.956779 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-log-socket\") pod \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.956731 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" (UID: "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.956819 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" (UID: "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.956797 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-run-netns\") pod \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\" (UID: \"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b\") " Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.956844 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" (UID: "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.957224 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" (UID: "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.957275 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" (UID: "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.957307 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" (UID: "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.957341 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-node-log" (OuterVolumeSpecName: "node-log") pod "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" (UID: "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.957366 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" (UID: "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.957366 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" (UID: "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.957391 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-log-socket" (OuterVolumeSpecName: "log-socket") pod "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" (UID: "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.957441 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" (UID: "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.957480 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" (UID: "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.957512 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" (UID: "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.957295 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-slash" (OuterVolumeSpecName: "host-slash") pod "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" (UID: "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.957661 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" (UID: "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.957747 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" (UID: "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.958031 4910 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-systemd-units\") on node \"crc\" DevicePath \"\"" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.958057 4910 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.958083 4910 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.958095 4910 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.958106 4910 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.958147 4910 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-cni-bin\") on node \"crc\" DevicePath \"\"" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.958158 4910 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.958173 4910 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.958185 4910 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-kubelet\") on node \"crc\" DevicePath \"\"" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.958211 4910 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-slash\") on node \"crc\" DevicePath \"\"" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.958222 4910 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.958232 4910 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-node-log\") on node \"crc\" DevicePath \"\"" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.958241 4910 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.958252 4910 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-run-netns\") on node \"crc\" DevicePath \"\"" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.958262 4910 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-log-socket\") on node \"crc\" DevicePath \"\"" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.958293 4910 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-host-cni-netd\") on node \"crc\" DevicePath \"\"" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.958304 4910 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-run-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.963992 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" (UID: "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.964431 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-kube-api-access-gjpvg" (OuterVolumeSpecName: "kube-api-access-gjpvg") pod "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" (UID: "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b"). InnerVolumeSpecName "kube-api-access-gjpvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.978427 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" (UID: "f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.995517 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-42lt6"] Jan 05 22:03:01 crc kubenswrapper[4910]: E0105 22:03:01.995888 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="ovnkube-controller" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.995906 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="ovnkube-controller" Jan 05 22:03:01 crc kubenswrapper[4910]: E0105 22:03:01.995916 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="kube-rbac-proxy-node" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.995923 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="kube-rbac-proxy-node" Jan 05 22:03:01 crc kubenswrapper[4910]: E0105 22:03:01.995931 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="ovn-acl-logging" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.995937 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="ovn-acl-logging" Jan 05 22:03:01 crc kubenswrapper[4910]: E0105 22:03:01.995948 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="northd" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.995953 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="northd" Jan 05 22:03:01 crc kubenswrapper[4910]: E0105 22:03:01.995963 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="ovn-controller" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.995968 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="ovn-controller" Jan 05 22:03:01 crc kubenswrapper[4910]: E0105 22:03:01.995976 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b5c5e69-e0b4-4616-8284-99ca77b66846" containerName="storage" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.995982 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b5c5e69-e0b4-4616-8284-99ca77b66846" containerName="storage" Jan 05 22:03:01 crc kubenswrapper[4910]: E0105 22:03:01.995991 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="kube-rbac-proxy-ovn-metrics" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.995997 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="kube-rbac-proxy-ovn-metrics" Jan 05 22:03:01 crc kubenswrapper[4910]: E0105 22:03:01.996004 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="sbdb" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.996009 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="sbdb" Jan 05 22:03:01 crc kubenswrapper[4910]: E0105 22:03:01.996018 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="ovnkube-controller" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.996023 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="ovnkube-controller" Jan 05 22:03:01 crc kubenswrapper[4910]: E0105 22:03:01.996030 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="ovnkube-controller" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.996035 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="ovnkube-controller" Jan 05 22:03:01 crc kubenswrapper[4910]: E0105 22:03:01.996043 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="kubecfg-setup" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.996048 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="kubecfg-setup" Jan 05 22:03:01 crc kubenswrapper[4910]: E0105 22:03:01.996056 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="ovnkube-controller" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.996061 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="ovnkube-controller" Jan 05 22:03:01 crc kubenswrapper[4910]: E0105 22:03:01.996069 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="nbdb" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.996074 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="nbdb" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.996173 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="ovnkube-controller" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.996182 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="nbdb" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.996190 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="sbdb" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.996199 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b5c5e69-e0b4-4616-8284-99ca77b66846" containerName="storage" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.996207 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="ovn-acl-logging" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.996216 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="ovnkube-controller" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.996227 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="kube-rbac-proxy-node" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.996236 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="ovn-controller" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.996242 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="kube-rbac-proxy-ovn-metrics" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.996250 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="northd" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.996286 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="ovnkube-controller" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.996295 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="ovnkube-controller" Jan 05 22:03:01 crc kubenswrapper[4910]: E0105 22:03:01.996421 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="ovnkube-controller" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.996429 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="ovnkube-controller" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.996510 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerName="ovnkube-controller" Jan 05 22:03:01 crc kubenswrapper[4910]: I0105 22:03:01.998000 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.059484 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-host-run-netns\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.059560 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-run-systemd\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.059588 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-systemd-units\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.059612 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-host-kubelet\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.059810 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-run-ovn\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.059953 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/9b65fca0-2437-42fc-be5e-0a7884a480f3-ovn-node-metrics-cert\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.060018 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-etc-openvswitch\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.060146 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-host-cni-bin\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.060245 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-log-socket\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.060281 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-host-slash\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.060316 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-host-run-ovn-kubernetes\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.060344 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-run-openvswitch\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.060362 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/9b65fca0-2437-42fc-be5e-0a7884a480f3-env-overrides\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.060380 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/9b65fca0-2437-42fc-be5e-0a7884a480f3-ovnkube-config\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.060402 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/9b65fca0-2437-42fc-be5e-0a7884a480f3-ovnkube-script-lib\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.060421 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.060543 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-var-lib-openvswitch\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.060600 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-node-log\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.060629 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-host-cni-netd\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.060668 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k4fgs\" (UniqueName: \"kubernetes.io/projected/9b65fca0-2437-42fc-be5e-0a7884a480f3-kube-api-access-k4fgs\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.060834 4910 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.060866 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gjpvg\" (UniqueName: \"kubernetes.io/projected/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-kube-api-access-gjpvg\") on node \"crc\" DevicePath \"\"" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.060884 4910 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b-run-systemd\") on node \"crc\" DevicePath \"\"" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.140770 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx"] Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.142540 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.144338 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.162020 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-run-systemd\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.162097 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8c3f7294-a422-47b1-a323-82a8ac718bdc-bundle\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx\" (UID: \"8c3f7294-a422-47b1-a323-82a8ac718bdc\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.162185 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-systemd-units\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.162230 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-host-kubelet\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.162255 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-run-ovn\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.162276 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/9b65fca0-2437-42fc-be5e-0a7884a480f3-ovn-node-metrics-cert\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.162284 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-systemd-units\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.162317 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-etc-openvswitch\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.162332 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-host-kubelet\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.162336 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8c3f7294-a422-47b1-a323-82a8ac718bdc-util\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx\" (UID: \"8c3f7294-a422-47b1-a323-82a8ac718bdc\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.162373 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-run-ovn\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.162408 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-etc-openvswitch\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.162475 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4cxrr\" (UniqueName: \"kubernetes.io/projected/8c3f7294-a422-47b1-a323-82a8ac718bdc-kube-api-access-4cxrr\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx\" (UID: \"8c3f7294-a422-47b1-a323-82a8ac718bdc\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.162528 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-host-cni-bin\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.162569 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-log-socket\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.162610 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-host-slash\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.162654 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-host-run-ovn-kubernetes\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.162700 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-run-openvswitch\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.162735 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/9b65fca0-2437-42fc-be5e-0a7884a480f3-env-overrides\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.162767 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/9b65fca0-2437-42fc-be5e-0a7884a480f3-ovnkube-config\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.162791 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-log-socket\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.162814 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/9b65fca0-2437-42fc-be5e-0a7884a480f3-ovnkube-script-lib\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.162863 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.162908 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-var-lib-openvswitch\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.162928 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-node-log\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.162947 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-host-cni-netd\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.162980 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k4fgs\" (UniqueName: \"kubernetes.io/projected/9b65fca0-2437-42fc-be5e-0a7884a480f3-kube-api-access-k4fgs\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.163020 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-host-run-netns\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.163106 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-run-openvswitch\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.163229 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-host-cni-bin\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.163172 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-host-run-netns\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.163189 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-var-lib-openvswitch\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.163193 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-node-log\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.163211 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-host-cni-netd\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.163334 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.163164 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-host-slash\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.163688 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-host-run-ovn-kubernetes\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.163955 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/9b65fca0-2437-42fc-be5e-0a7884a480f3-env-overrides\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.164209 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/9b65fca0-2437-42fc-be5e-0a7884a480f3-run-systemd\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.164555 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/9b65fca0-2437-42fc-be5e-0a7884a480f3-ovnkube-script-lib\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.164583 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/9b65fca0-2437-42fc-be5e-0a7884a480f3-ovnkube-config\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.165889 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/9b65fca0-2437-42fc-be5e-0a7884a480f3-ovn-node-metrics-cert\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.179434 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k4fgs\" (UniqueName: \"kubernetes.io/projected/9b65fca0-2437-42fc-be5e-0a7884a480f3-kube-api-access-k4fgs\") pod \"ovnkube-node-42lt6\" (UID: \"9b65fca0-2437-42fc-be5e-0a7884a480f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.264639 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8c3f7294-a422-47b1-a323-82a8ac718bdc-bundle\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx\" (UID: \"8c3f7294-a422-47b1-a323-82a8ac718bdc\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.264730 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8c3f7294-a422-47b1-a323-82a8ac718bdc-util\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx\" (UID: \"8c3f7294-a422-47b1-a323-82a8ac718bdc\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.264766 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4cxrr\" (UniqueName: \"kubernetes.io/projected/8c3f7294-a422-47b1-a323-82a8ac718bdc-kube-api-access-4cxrr\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx\" (UID: \"8c3f7294-a422-47b1-a323-82a8ac718bdc\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.265132 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8c3f7294-a422-47b1-a323-82a8ac718bdc-bundle\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx\" (UID: \"8c3f7294-a422-47b1-a323-82a8ac718bdc\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.265358 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8c3f7294-a422-47b1-a323-82a8ac718bdc-util\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx\" (UID: \"8c3f7294-a422-47b1-a323-82a8ac718bdc\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.282529 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4cxrr\" (UniqueName: \"kubernetes.io/projected/8c3f7294-a422-47b1-a323-82a8ac718bdc-kube-api-access-4cxrr\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx\" (UID: \"8c3f7294-a422-47b1-a323-82a8ac718bdc\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.310912 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.361178 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-9zscm_07ebbe82-9e6e-47a5-91a7-4b515efc78db/kube-multus/2.log" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.361935 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-9zscm_07ebbe82-9e6e-47a5-91a7-4b515efc78db/kube-multus/1.log" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.361981 4910 generic.go:334] "Generic (PLEG): container finished" podID="07ebbe82-9e6e-47a5-91a7-4b515efc78db" containerID="1e8e55b2eb471b04f5366d8afb10f17f2bd5769bbfb6591d9aa2ac2beafc6b0c" exitCode=2 Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.362092 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-9zscm" event={"ID":"07ebbe82-9e6e-47a5-91a7-4b515efc78db","Type":"ContainerDied","Data":"1e8e55b2eb471b04f5366d8afb10f17f2bd5769bbfb6591d9aa2ac2beafc6b0c"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.362182 4910 scope.go:117] "RemoveContainer" containerID="8f84f3608a1f16a89bb0b2bd33ddfd1fd31073c40e4528dd2de478f96cf60a75" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.362839 4910 scope.go:117] "RemoveContainer" containerID="1e8e55b2eb471b04f5366d8afb10f17f2bd5769bbfb6591d9aa2ac2beafc6b0c" Jan 05 22:03:02 crc kubenswrapper[4910]: E0105 22:03:02.363037 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-9zscm_openshift-multus(07ebbe82-9e6e-47a5-91a7-4b515efc78db)\"" pod="openshift-multus/multus-9zscm" podUID="07ebbe82-9e6e-47a5-91a7-4b515efc78db" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.365884 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpk76_f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b/ovnkube-controller/3.log" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.368480 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpk76_f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b/ovn-acl-logging/0.log" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369043 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpk76_f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b/ovn-controller/0.log" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369463 4910 generic.go:334] "Generic (PLEG): container finished" podID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerID="38d9833c4a5e415a92059ebf4d861d5b054b8235938e1980f88e206d819daee6" exitCode=0 Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369489 4910 generic.go:334] "Generic (PLEG): container finished" podID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerID="7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8" exitCode=0 Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369499 4910 generic.go:334] "Generic (PLEG): container finished" podID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerID="5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d" exitCode=0 Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369507 4910 generic.go:334] "Generic (PLEG): container finished" podID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerID="15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947" exitCode=0 Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369513 4910 generic.go:334] "Generic (PLEG): container finished" podID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerID="3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4" exitCode=0 Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369521 4910 generic.go:334] "Generic (PLEG): container finished" podID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerID="f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620" exitCode=0 Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369528 4910 generic.go:334] "Generic (PLEG): container finished" podID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerID="94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062" exitCode=143 Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369534 4910 generic.go:334] "Generic (PLEG): container finished" podID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" containerID="4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3" exitCode=143 Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369548 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" event={"ID":"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b","Type":"ContainerDied","Data":"38d9833c4a5e415a92059ebf4d861d5b054b8235938e1980f88e206d819daee6"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369573 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369588 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" event={"ID":"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b","Type":"ContainerDied","Data":"7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369605 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" event={"ID":"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b","Type":"ContainerDied","Data":"5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369624 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" event={"ID":"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b","Type":"ContainerDied","Data":"15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369640 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" event={"ID":"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b","Type":"ContainerDied","Data":"3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369655 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" event={"ID":"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b","Type":"ContainerDied","Data":"f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369671 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"38d9833c4a5e415a92059ebf4d861d5b054b8235938e1980f88e206d819daee6"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369686 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369694 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369701 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369708 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369716 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369723 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369730 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369737 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369744 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369753 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" event={"ID":"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b","Type":"ContainerDied","Data":"94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369766 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"38d9833c4a5e415a92059ebf4d861d5b054b8235938e1980f88e206d819daee6"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369775 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369783 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369789 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369799 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369806 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369813 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369821 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369828 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369834 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369844 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" event={"ID":"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b","Type":"ContainerDied","Data":"4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369856 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"38d9833c4a5e415a92059ebf4d861d5b054b8235938e1980f88e206d819daee6"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369865 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369872 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369880 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369887 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369894 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369901 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369908 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369916 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369924 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369934 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpk76" event={"ID":"f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b","Type":"ContainerDied","Data":"3d208d1bec35c3b6cf5bca037630b84c591b8a23c3201b51070386218411a43f"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369946 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"38d9833c4a5e415a92059ebf4d861d5b054b8235938e1980f88e206d819daee6"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369955 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369964 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369972 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369980 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369988 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.369996 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.370005 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.370014 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.370022 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.370705 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" event={"ID":"9b65fca0-2437-42fc-be5e-0a7884a480f3","Type":"ContainerStarted","Data":"807059aa270d3f1b5df50449e175afcf235b3dbab1391b1a4462e5789c8a30b4"} Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.392299 4910 scope.go:117] "RemoveContainer" containerID="38d9833c4a5e415a92059ebf4d861d5b054b8235938e1980f88e206d819daee6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.417217 4910 scope.go:117] "RemoveContainer" containerID="b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.432651 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-fpk76"] Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.438973 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-fpk76"] Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.455410 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.455867 4910 scope.go:117] "RemoveContainer" containerID="7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8" Jan 05 22:03:02 crc kubenswrapper[4910]: E0105 22:03:02.476970 4910 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx_openshift-marketplace_8c3f7294-a422-47b1-a323-82a8ac718bdc_0(b61840b12173c85cd85d5c51feffe8669933fd19f5054f2783785b3fb62de73e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 05 22:03:02 crc kubenswrapper[4910]: E0105 22:03:02.477064 4910 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx_openshift-marketplace_8c3f7294-a422-47b1-a323-82a8ac718bdc_0(b61840b12173c85cd85d5c51feffe8669933fd19f5054f2783785b3fb62de73e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" Jan 05 22:03:02 crc kubenswrapper[4910]: E0105 22:03:02.477088 4910 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx_openshift-marketplace_8c3f7294-a422-47b1-a323-82a8ac718bdc_0(b61840b12173c85cd85d5c51feffe8669933fd19f5054f2783785b3fb62de73e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" Jan 05 22:03:02 crc kubenswrapper[4910]: E0105 22:03:02.477157 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx_openshift-marketplace(8c3f7294-a422-47b1-a323-82a8ac718bdc)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx_openshift-marketplace(8c3f7294-a422-47b1-a323-82a8ac718bdc)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx_openshift-marketplace_8c3f7294-a422-47b1-a323-82a8ac718bdc_0(b61840b12173c85cd85d5c51feffe8669933fd19f5054f2783785b3fb62de73e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" podUID="8c3f7294-a422-47b1-a323-82a8ac718bdc" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.523088 4910 scope.go:117] "RemoveContainer" containerID="5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.537419 4910 scope.go:117] "RemoveContainer" containerID="15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.553867 4910 scope.go:117] "RemoveContainer" containerID="3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.566706 4910 scope.go:117] "RemoveContainer" containerID="f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.579639 4910 scope.go:117] "RemoveContainer" containerID="94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.593383 4910 scope.go:117] "RemoveContainer" containerID="4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.607304 4910 scope.go:117] "RemoveContainer" containerID="4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.622401 4910 scope.go:117] "RemoveContainer" containerID="38d9833c4a5e415a92059ebf4d861d5b054b8235938e1980f88e206d819daee6" Jan 05 22:03:02 crc kubenswrapper[4910]: E0105 22:03:02.628064 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38d9833c4a5e415a92059ebf4d861d5b054b8235938e1980f88e206d819daee6\": container with ID starting with 38d9833c4a5e415a92059ebf4d861d5b054b8235938e1980f88e206d819daee6 not found: ID does not exist" containerID="38d9833c4a5e415a92059ebf4d861d5b054b8235938e1980f88e206d819daee6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.628130 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38d9833c4a5e415a92059ebf4d861d5b054b8235938e1980f88e206d819daee6"} err="failed to get container status \"38d9833c4a5e415a92059ebf4d861d5b054b8235938e1980f88e206d819daee6\": rpc error: code = NotFound desc = could not find container \"38d9833c4a5e415a92059ebf4d861d5b054b8235938e1980f88e206d819daee6\": container with ID starting with 38d9833c4a5e415a92059ebf4d861d5b054b8235938e1980f88e206d819daee6 not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.628170 4910 scope.go:117] "RemoveContainer" containerID="b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed" Jan 05 22:03:02 crc kubenswrapper[4910]: E0105 22:03:02.628709 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed\": container with ID starting with b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed not found: ID does not exist" containerID="b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.628758 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed"} err="failed to get container status \"b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed\": rpc error: code = NotFound desc = could not find container \"b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed\": container with ID starting with b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.628793 4910 scope.go:117] "RemoveContainer" containerID="7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8" Jan 05 22:03:02 crc kubenswrapper[4910]: E0105 22:03:02.629106 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\": container with ID starting with 7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8 not found: ID does not exist" containerID="7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.629155 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8"} err="failed to get container status \"7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\": rpc error: code = NotFound desc = could not find container \"7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\": container with ID starting with 7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8 not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.629170 4910 scope.go:117] "RemoveContainer" containerID="5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d" Jan 05 22:03:02 crc kubenswrapper[4910]: E0105 22:03:02.629445 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\": container with ID starting with 5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d not found: ID does not exist" containerID="5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.629471 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d"} err="failed to get container status \"5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\": rpc error: code = NotFound desc = could not find container \"5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\": container with ID starting with 5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.629487 4910 scope.go:117] "RemoveContainer" containerID="15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947" Jan 05 22:03:02 crc kubenswrapper[4910]: E0105 22:03:02.629729 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\": container with ID starting with 15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947 not found: ID does not exist" containerID="15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.629759 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947"} err="failed to get container status \"15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\": rpc error: code = NotFound desc = could not find container \"15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\": container with ID starting with 15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947 not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.629789 4910 scope.go:117] "RemoveContainer" containerID="3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4" Jan 05 22:03:02 crc kubenswrapper[4910]: E0105 22:03:02.630061 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\": container with ID starting with 3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4 not found: ID does not exist" containerID="3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.630083 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4"} err="failed to get container status \"3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\": rpc error: code = NotFound desc = could not find container \"3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\": container with ID starting with 3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4 not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.630098 4910 scope.go:117] "RemoveContainer" containerID="f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620" Jan 05 22:03:02 crc kubenswrapper[4910]: E0105 22:03:02.630355 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\": container with ID starting with f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620 not found: ID does not exist" containerID="f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.630386 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620"} err="failed to get container status \"f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\": rpc error: code = NotFound desc = could not find container \"f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\": container with ID starting with f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620 not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.630406 4910 scope.go:117] "RemoveContainer" containerID="94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062" Jan 05 22:03:02 crc kubenswrapper[4910]: E0105 22:03:02.630650 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\": container with ID starting with 94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062 not found: ID does not exist" containerID="94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.630690 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062"} err="failed to get container status \"94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\": rpc error: code = NotFound desc = could not find container \"94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\": container with ID starting with 94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062 not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.630705 4910 scope.go:117] "RemoveContainer" containerID="4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3" Jan 05 22:03:02 crc kubenswrapper[4910]: E0105 22:03:02.631954 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\": container with ID starting with 4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3 not found: ID does not exist" containerID="4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.631982 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3"} err="failed to get container status \"4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\": rpc error: code = NotFound desc = could not find container \"4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\": container with ID starting with 4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3 not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.632022 4910 scope.go:117] "RemoveContainer" containerID="4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95" Jan 05 22:03:02 crc kubenswrapper[4910]: E0105 22:03:02.633406 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\": container with ID starting with 4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95 not found: ID does not exist" containerID="4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.633446 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95"} err="failed to get container status \"4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\": rpc error: code = NotFound desc = could not find container \"4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\": container with ID starting with 4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95 not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.633465 4910 scope.go:117] "RemoveContainer" containerID="38d9833c4a5e415a92059ebf4d861d5b054b8235938e1980f88e206d819daee6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.635521 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38d9833c4a5e415a92059ebf4d861d5b054b8235938e1980f88e206d819daee6"} err="failed to get container status \"38d9833c4a5e415a92059ebf4d861d5b054b8235938e1980f88e206d819daee6\": rpc error: code = NotFound desc = could not find container \"38d9833c4a5e415a92059ebf4d861d5b054b8235938e1980f88e206d819daee6\": container with ID starting with 38d9833c4a5e415a92059ebf4d861d5b054b8235938e1980f88e206d819daee6 not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.635569 4910 scope.go:117] "RemoveContainer" containerID="b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.636526 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed"} err="failed to get container status \"b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed\": rpc error: code = NotFound desc = could not find container \"b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed\": container with ID starting with b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.636560 4910 scope.go:117] "RemoveContainer" containerID="7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.637099 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8"} err="failed to get container status \"7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\": rpc error: code = NotFound desc = could not find container \"7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\": container with ID starting with 7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8 not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.637183 4910 scope.go:117] "RemoveContainer" containerID="5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.637517 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d"} err="failed to get container status \"5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\": rpc error: code = NotFound desc = could not find container \"5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\": container with ID starting with 5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.637554 4910 scope.go:117] "RemoveContainer" containerID="15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.638034 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947"} err="failed to get container status \"15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\": rpc error: code = NotFound desc = could not find container \"15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\": container with ID starting with 15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947 not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.638063 4910 scope.go:117] "RemoveContainer" containerID="3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.639609 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4"} err="failed to get container status \"3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\": rpc error: code = NotFound desc = could not find container \"3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\": container with ID starting with 3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4 not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.639637 4910 scope.go:117] "RemoveContainer" containerID="f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.639949 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620"} err="failed to get container status \"f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\": rpc error: code = NotFound desc = could not find container \"f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\": container with ID starting with f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620 not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.639973 4910 scope.go:117] "RemoveContainer" containerID="94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.640225 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062"} err="failed to get container status \"94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\": rpc error: code = NotFound desc = could not find container \"94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\": container with ID starting with 94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062 not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.640249 4910 scope.go:117] "RemoveContainer" containerID="4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.640466 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3"} err="failed to get container status \"4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\": rpc error: code = NotFound desc = could not find container \"4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\": container with ID starting with 4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3 not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.640486 4910 scope.go:117] "RemoveContainer" containerID="4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.640883 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95"} err="failed to get container status \"4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\": rpc error: code = NotFound desc = could not find container \"4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\": container with ID starting with 4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95 not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.640911 4910 scope.go:117] "RemoveContainer" containerID="38d9833c4a5e415a92059ebf4d861d5b054b8235938e1980f88e206d819daee6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.641564 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38d9833c4a5e415a92059ebf4d861d5b054b8235938e1980f88e206d819daee6"} err="failed to get container status \"38d9833c4a5e415a92059ebf4d861d5b054b8235938e1980f88e206d819daee6\": rpc error: code = NotFound desc = could not find container \"38d9833c4a5e415a92059ebf4d861d5b054b8235938e1980f88e206d819daee6\": container with ID starting with 38d9833c4a5e415a92059ebf4d861d5b054b8235938e1980f88e206d819daee6 not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.641657 4910 scope.go:117] "RemoveContainer" containerID="b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.642111 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed"} err="failed to get container status \"b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed\": rpc error: code = NotFound desc = could not find container \"b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed\": container with ID starting with b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.642157 4910 scope.go:117] "RemoveContainer" containerID="7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.642731 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8"} err="failed to get container status \"7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\": rpc error: code = NotFound desc = could not find container \"7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\": container with ID starting with 7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8 not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.642761 4910 scope.go:117] "RemoveContainer" containerID="5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.643174 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d"} err="failed to get container status \"5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\": rpc error: code = NotFound desc = could not find container \"5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\": container with ID starting with 5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.643205 4910 scope.go:117] "RemoveContainer" containerID="15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.643673 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947"} err="failed to get container status \"15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\": rpc error: code = NotFound desc = could not find container \"15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\": container with ID starting with 15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947 not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.643717 4910 scope.go:117] "RemoveContainer" containerID="3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.644047 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4"} err="failed to get container status \"3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\": rpc error: code = NotFound desc = could not find container \"3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\": container with ID starting with 3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4 not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.644088 4910 scope.go:117] "RemoveContainer" containerID="f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.644379 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620"} err="failed to get container status \"f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\": rpc error: code = NotFound desc = could not find container \"f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\": container with ID starting with f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620 not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.644448 4910 scope.go:117] "RemoveContainer" containerID="94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.644927 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062"} err="failed to get container status \"94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\": rpc error: code = NotFound desc = could not find container \"94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\": container with ID starting with 94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062 not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.645282 4910 scope.go:117] "RemoveContainer" containerID="4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.645638 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3"} err="failed to get container status \"4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\": rpc error: code = NotFound desc = could not find container \"4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\": container with ID starting with 4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3 not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.645669 4910 scope.go:117] "RemoveContainer" containerID="4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.645983 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95"} err="failed to get container status \"4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\": rpc error: code = NotFound desc = could not find container \"4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\": container with ID starting with 4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95 not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.646011 4910 scope.go:117] "RemoveContainer" containerID="38d9833c4a5e415a92059ebf4d861d5b054b8235938e1980f88e206d819daee6" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.646391 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38d9833c4a5e415a92059ebf4d861d5b054b8235938e1980f88e206d819daee6"} err="failed to get container status \"38d9833c4a5e415a92059ebf4d861d5b054b8235938e1980f88e206d819daee6\": rpc error: code = NotFound desc = could not find container \"38d9833c4a5e415a92059ebf4d861d5b054b8235938e1980f88e206d819daee6\": container with ID starting with 38d9833c4a5e415a92059ebf4d861d5b054b8235938e1980f88e206d819daee6 not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.646436 4910 scope.go:117] "RemoveContainer" containerID="b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.646698 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed"} err="failed to get container status \"b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed\": rpc error: code = NotFound desc = could not find container \"b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed\": container with ID starting with b1fbe4f06dcb8b7037b5c0a391c99cbb23e18650b2ad7bae72873b23e3179fed not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.646723 4910 scope.go:117] "RemoveContainer" containerID="7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.646955 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8"} err="failed to get container status \"7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\": rpc error: code = NotFound desc = could not find container \"7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8\": container with ID starting with 7bcd5b13e825eda70cfae603bcc82f3f85a8266653bf56ba228d1954d91645a8 not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.646978 4910 scope.go:117] "RemoveContainer" containerID="5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.647222 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d"} err="failed to get container status \"5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\": rpc error: code = NotFound desc = could not find container \"5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d\": container with ID starting with 5b921dcc22af91554be9aba58727bfb7b7a38f019e26711823bbccd1609c4e6d not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.647244 4910 scope.go:117] "RemoveContainer" containerID="15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.647500 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947"} err="failed to get container status \"15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\": rpc error: code = NotFound desc = could not find container \"15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947\": container with ID starting with 15de47eed0167fa74b09a6f11bff4893ef9350f14fdc9a1810ff932673370947 not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.647521 4910 scope.go:117] "RemoveContainer" containerID="3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.647729 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4"} err="failed to get container status \"3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\": rpc error: code = NotFound desc = could not find container \"3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4\": container with ID starting with 3238062f9c45e9e571a0dc813a259771dd595683bbb18e30e51cf38c594438c4 not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.647749 4910 scope.go:117] "RemoveContainer" containerID="f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.647947 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620"} err="failed to get container status \"f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\": rpc error: code = NotFound desc = could not find container \"f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620\": container with ID starting with f9311df55c46f267fe8905e71dbbcb6b5d6e00e21d0124fa7a3acde459153620 not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.647967 4910 scope.go:117] "RemoveContainer" containerID="94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.648188 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062"} err="failed to get container status \"94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\": rpc error: code = NotFound desc = could not find container \"94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062\": container with ID starting with 94580d2b1f44cd64aa18377adeb1283e6d08a108c8542d6328151f287dcc9062 not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.648209 4910 scope.go:117] "RemoveContainer" containerID="4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.648393 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3"} err="failed to get container status \"4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\": rpc error: code = NotFound desc = could not find container \"4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3\": container with ID starting with 4ecd9414e08af66e59c55d7b1ffff2839e22ac38b57088d01f3e82068c4a5bf3 not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.648443 4910 scope.go:117] "RemoveContainer" containerID="4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.648756 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95"} err="failed to get container status \"4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\": rpc error: code = NotFound desc = could not find container \"4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95\": container with ID starting with 4136c0469148057081944dbaf8dfeef611c909c35f8088543d75c26f43a91a95 not found: ID does not exist" Jan 05 22:03:02 crc kubenswrapper[4910]: I0105 22:03:02.729536 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b" path="/var/lib/kubelet/pods/f85c76d1-cfbe-4c6b-86c1-6d51bd45b42b/volumes" Jan 05 22:03:03 crc kubenswrapper[4910]: I0105 22:03:03.380993 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-9zscm_07ebbe82-9e6e-47a5-91a7-4b515efc78db/kube-multus/2.log" Jan 05 22:03:03 crc kubenswrapper[4910]: I0105 22:03:03.384945 4910 generic.go:334] "Generic (PLEG): container finished" podID="9b65fca0-2437-42fc-be5e-0a7884a480f3" containerID="35195ab8748dbfbcaab8c1df3a17019a30dec305f0ec2ec5232f17f5dd775efd" exitCode=0 Jan 05 22:03:03 crc kubenswrapper[4910]: I0105 22:03:03.384989 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" event={"ID":"9b65fca0-2437-42fc-be5e-0a7884a480f3","Type":"ContainerDied","Data":"35195ab8748dbfbcaab8c1df3a17019a30dec305f0ec2ec5232f17f5dd775efd"} Jan 05 22:03:04 crc kubenswrapper[4910]: I0105 22:03:04.393894 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" event={"ID":"9b65fca0-2437-42fc-be5e-0a7884a480f3","Type":"ContainerStarted","Data":"2a86e447cb3d5b837a682f45ff95240b709b018389bb3ed2dd697f04182d9177"} Jan 05 22:03:04 crc kubenswrapper[4910]: I0105 22:03:04.394310 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" event={"ID":"9b65fca0-2437-42fc-be5e-0a7884a480f3","Type":"ContainerStarted","Data":"49dc68a8ee765138c179dff09fe4117a267f2387536ff5cf7bfb4d35e25a0da2"} Jan 05 22:03:04 crc kubenswrapper[4910]: I0105 22:03:04.394331 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" event={"ID":"9b65fca0-2437-42fc-be5e-0a7884a480f3","Type":"ContainerStarted","Data":"0abdbbb7426c525932efba534fd558bc91b141f59c6d50a8c1ba9823060d23b7"} Jan 05 22:03:04 crc kubenswrapper[4910]: I0105 22:03:04.394345 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" event={"ID":"9b65fca0-2437-42fc-be5e-0a7884a480f3","Type":"ContainerStarted","Data":"4b0f29852b9e9af7d83dbede07e956990f9c929af5a07782143d4c07246ac589"} Jan 05 22:03:04 crc kubenswrapper[4910]: I0105 22:03:04.394356 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" event={"ID":"9b65fca0-2437-42fc-be5e-0a7884a480f3","Type":"ContainerStarted","Data":"b3adfe6983128f25dbe61f9f10bc75f70594c56874acf36f306a2f377617e62c"} Jan 05 22:03:04 crc kubenswrapper[4910]: I0105 22:03:04.394373 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" event={"ID":"9b65fca0-2437-42fc-be5e-0a7884a480f3","Type":"ContainerStarted","Data":"e52c1c00e8132556541938d5688bae5ce0f877108dcd107cdacfd02fad87edd8"} Jan 05 22:03:06 crc kubenswrapper[4910]: I0105 22:03:06.414570 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" event={"ID":"9b65fca0-2437-42fc-be5e-0a7884a480f3","Type":"ContainerStarted","Data":"4d51f21ab05e4fe7c01de191fc50b75ed1c4d3c31409a585578c388d75cc6a36"} Jan 05 22:03:09 crc kubenswrapper[4910]: I0105 22:03:09.425157 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx"] Jan 05 22:03:09 crc kubenswrapper[4910]: I0105 22:03:09.425762 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" Jan 05 22:03:09 crc kubenswrapper[4910]: I0105 22:03:09.426226 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" Jan 05 22:03:09 crc kubenswrapper[4910]: I0105 22:03:09.440067 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" event={"ID":"9b65fca0-2437-42fc-be5e-0a7884a480f3","Type":"ContainerStarted","Data":"c4c90eaad98927b48dbcedbfb0b0c50c1a1a17d0b0a33f5fbff45805ed8f5ce8"} Jan 05 22:03:09 crc kubenswrapper[4910]: I0105 22:03:09.441216 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:09 crc kubenswrapper[4910]: I0105 22:03:09.441256 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:09 crc kubenswrapper[4910]: I0105 22:03:09.441322 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:09 crc kubenswrapper[4910]: E0105 22:03:09.458491 4910 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx_openshift-marketplace_8c3f7294-a422-47b1-a323-82a8ac718bdc_0(769342a384268f16ed14dfa1be54f8769cae8b2c086badf4ccea01085d70439b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 05 22:03:09 crc kubenswrapper[4910]: E0105 22:03:09.458636 4910 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx_openshift-marketplace_8c3f7294-a422-47b1-a323-82a8ac718bdc_0(769342a384268f16ed14dfa1be54f8769cae8b2c086badf4ccea01085d70439b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" Jan 05 22:03:09 crc kubenswrapper[4910]: E0105 22:03:09.458707 4910 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx_openshift-marketplace_8c3f7294-a422-47b1-a323-82a8ac718bdc_0(769342a384268f16ed14dfa1be54f8769cae8b2c086badf4ccea01085d70439b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" Jan 05 22:03:09 crc kubenswrapper[4910]: E0105 22:03:09.458852 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx_openshift-marketplace(8c3f7294-a422-47b1-a323-82a8ac718bdc)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx_openshift-marketplace(8c3f7294-a422-47b1-a323-82a8ac718bdc)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx_openshift-marketplace_8c3f7294-a422-47b1-a323-82a8ac718bdc_0(769342a384268f16ed14dfa1be54f8769cae8b2c086badf4ccea01085d70439b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" podUID="8c3f7294-a422-47b1-a323-82a8ac718bdc" Jan 05 22:03:09 crc kubenswrapper[4910]: I0105 22:03:09.468168 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" podStartSLOduration=8.468149473 podStartE2EDuration="8.468149473s" podCreationTimestamp="2026-01-05 22:03:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:03:09.467221302 +0000 UTC m=+721.044718982" watchObservedRunningTime="2026-01-05 22:03:09.468149473 +0000 UTC m=+721.045647143" Jan 05 22:03:09 crc kubenswrapper[4910]: I0105 22:03:09.478483 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:09 crc kubenswrapper[4910]: I0105 22:03:09.480072 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:16 crc kubenswrapper[4910]: I0105 22:03:16.722558 4910 scope.go:117] "RemoveContainer" containerID="1e8e55b2eb471b04f5366d8afb10f17f2bd5769bbfb6591d9aa2ac2beafc6b0c" Jan 05 22:03:16 crc kubenswrapper[4910]: E0105 22:03:16.724157 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-9zscm_openshift-multus(07ebbe82-9e6e-47a5-91a7-4b515efc78db)\"" pod="openshift-multus/multus-9zscm" podUID="07ebbe82-9e6e-47a5-91a7-4b515efc78db" Jan 05 22:03:20 crc kubenswrapper[4910]: I0105 22:03:20.720901 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" Jan 05 22:03:20 crc kubenswrapper[4910]: I0105 22:03:20.722003 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" Jan 05 22:03:20 crc kubenswrapper[4910]: E0105 22:03:20.761812 4910 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx_openshift-marketplace_8c3f7294-a422-47b1-a323-82a8ac718bdc_0(4cf650546598e022ff8b02907b27fffd44d80b628338398cfcb0da833f0c06a6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 05 22:03:20 crc kubenswrapper[4910]: E0105 22:03:20.761913 4910 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx_openshift-marketplace_8c3f7294-a422-47b1-a323-82a8ac718bdc_0(4cf650546598e022ff8b02907b27fffd44d80b628338398cfcb0da833f0c06a6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" Jan 05 22:03:20 crc kubenswrapper[4910]: E0105 22:03:20.761957 4910 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx_openshift-marketplace_8c3f7294-a422-47b1-a323-82a8ac718bdc_0(4cf650546598e022ff8b02907b27fffd44d80b628338398cfcb0da833f0c06a6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" Jan 05 22:03:20 crc kubenswrapper[4910]: E0105 22:03:20.762274 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx_openshift-marketplace(8c3f7294-a422-47b1-a323-82a8ac718bdc)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx_openshift-marketplace(8c3f7294-a422-47b1-a323-82a8ac718bdc)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx_openshift-marketplace_8c3f7294-a422-47b1-a323-82a8ac718bdc_0(4cf650546598e022ff8b02907b27fffd44d80b628338398cfcb0da833f0c06a6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" podUID="8c3f7294-a422-47b1-a323-82a8ac718bdc" Jan 05 22:03:28 crc kubenswrapper[4910]: I0105 22:03:28.725393 4910 scope.go:117] "RemoveContainer" containerID="1e8e55b2eb471b04f5366d8afb10f17f2bd5769bbfb6591d9aa2ac2beafc6b0c" Jan 05 22:03:29 crc kubenswrapper[4910]: I0105 22:03:29.575627 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-9zscm_07ebbe82-9e6e-47a5-91a7-4b515efc78db/kube-multus/2.log" Jan 05 22:03:29 crc kubenswrapper[4910]: I0105 22:03:29.576332 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-9zscm" event={"ID":"07ebbe82-9e6e-47a5-91a7-4b515efc78db","Type":"ContainerStarted","Data":"3d3dbef9af3d880ee8df8e5268fed000ec54086cbe778a2f2446a9bf18932392"} Jan 05 22:03:31 crc kubenswrapper[4910]: I0105 22:03:31.720796 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" Jan 05 22:03:31 crc kubenswrapper[4910]: I0105 22:03:31.721299 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" Jan 05 22:03:32 crc kubenswrapper[4910]: I0105 22:03:32.139613 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx"] Jan 05 22:03:32 crc kubenswrapper[4910]: W0105 22:03:32.142574 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8c3f7294_a422_47b1_a323_82a8ac718bdc.slice/crio-690f8e81e116021eeb8ece9f2cfbbc196077256ab368f0c88feb9111e3d16d46 WatchSource:0}: Error finding container 690f8e81e116021eeb8ece9f2cfbbc196077256ab368f0c88feb9111e3d16d46: Status 404 returned error can't find the container with id 690f8e81e116021eeb8ece9f2cfbbc196077256ab368f0c88feb9111e3d16d46 Jan 05 22:03:32 crc kubenswrapper[4910]: I0105 22:03:32.339308 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-42lt6" Jan 05 22:03:32 crc kubenswrapper[4910]: I0105 22:03:32.598684 4910 generic.go:334] "Generic (PLEG): container finished" podID="8c3f7294-a422-47b1-a323-82a8ac718bdc" containerID="f4fd466ab9f30d94c9380d0e4f1c98459ceee1c0f1150bf944e28ff8c41c49e9" exitCode=0 Jan 05 22:03:32 crc kubenswrapper[4910]: I0105 22:03:32.598752 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" event={"ID":"8c3f7294-a422-47b1-a323-82a8ac718bdc","Type":"ContainerDied","Data":"f4fd466ab9f30d94c9380d0e4f1c98459ceee1c0f1150bf944e28ff8c41c49e9"} Jan 05 22:03:32 crc kubenswrapper[4910]: I0105 22:03:32.598791 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" event={"ID":"8c3f7294-a422-47b1-a323-82a8ac718bdc","Type":"ContainerStarted","Data":"690f8e81e116021eeb8ece9f2cfbbc196077256ab368f0c88feb9111e3d16d46"} Jan 05 22:03:34 crc kubenswrapper[4910]: I0105 22:03:34.617704 4910 generic.go:334] "Generic (PLEG): container finished" podID="8c3f7294-a422-47b1-a323-82a8ac718bdc" containerID="008559e91df8f7674fa284b3bf4a7173d009e1b0c3ba11886ff248f27f4aa3d8" exitCode=0 Jan 05 22:03:34 crc kubenswrapper[4910]: I0105 22:03:34.617838 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" event={"ID":"8c3f7294-a422-47b1-a323-82a8ac718bdc","Type":"ContainerDied","Data":"008559e91df8f7674fa284b3bf4a7173d009e1b0c3ba11886ff248f27f4aa3d8"} Jan 05 22:03:35 crc kubenswrapper[4910]: I0105 22:03:35.631906 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" event={"ID":"8c3f7294-a422-47b1-a323-82a8ac718bdc","Type":"ContainerStarted","Data":"b99a4e693c2e1afbd26f59137953c0acf6f1b0aff304697b64ef808edf5f9866"} Jan 05 22:03:35 crc kubenswrapper[4910]: I0105 22:03:35.649983 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" podStartSLOduration=32.224714156 podStartE2EDuration="33.649960448s" podCreationTimestamp="2026-01-05 22:03:02 +0000 UTC" firstStartedPulling="2026-01-05 22:03:32.601505087 +0000 UTC m=+744.179002757" lastFinishedPulling="2026-01-05 22:03:34.026751339 +0000 UTC m=+745.604249049" observedRunningTime="2026-01-05 22:03:35.648137506 +0000 UTC m=+747.225635176" watchObservedRunningTime="2026-01-05 22:03:35.649960448 +0000 UTC m=+747.227458118" Jan 05 22:03:36 crc kubenswrapper[4910]: I0105 22:03:36.641432 4910 generic.go:334] "Generic (PLEG): container finished" podID="8c3f7294-a422-47b1-a323-82a8ac718bdc" containerID="b99a4e693c2e1afbd26f59137953c0acf6f1b0aff304697b64ef808edf5f9866" exitCode=0 Jan 05 22:03:36 crc kubenswrapper[4910]: I0105 22:03:36.641552 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" event={"ID":"8c3f7294-a422-47b1-a323-82a8ac718bdc","Type":"ContainerDied","Data":"b99a4e693c2e1afbd26f59137953c0acf6f1b0aff304697b64ef808edf5f9866"} Jan 05 22:03:38 crc kubenswrapper[4910]: I0105 22:03:38.004300 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" Jan 05 22:03:38 crc kubenswrapper[4910]: I0105 22:03:38.130664 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8c3f7294-a422-47b1-a323-82a8ac718bdc-bundle\") pod \"8c3f7294-a422-47b1-a323-82a8ac718bdc\" (UID: \"8c3f7294-a422-47b1-a323-82a8ac718bdc\") " Jan 05 22:03:38 crc kubenswrapper[4910]: I0105 22:03:38.130741 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4cxrr\" (UniqueName: \"kubernetes.io/projected/8c3f7294-a422-47b1-a323-82a8ac718bdc-kube-api-access-4cxrr\") pod \"8c3f7294-a422-47b1-a323-82a8ac718bdc\" (UID: \"8c3f7294-a422-47b1-a323-82a8ac718bdc\") " Jan 05 22:03:38 crc kubenswrapper[4910]: I0105 22:03:38.130780 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8c3f7294-a422-47b1-a323-82a8ac718bdc-util\") pod \"8c3f7294-a422-47b1-a323-82a8ac718bdc\" (UID: \"8c3f7294-a422-47b1-a323-82a8ac718bdc\") " Jan 05 22:03:38 crc kubenswrapper[4910]: I0105 22:03:38.131788 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8c3f7294-a422-47b1-a323-82a8ac718bdc-bundle" (OuterVolumeSpecName: "bundle") pod "8c3f7294-a422-47b1-a323-82a8ac718bdc" (UID: "8c3f7294-a422-47b1-a323-82a8ac718bdc"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:03:38 crc kubenswrapper[4910]: I0105 22:03:38.137665 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c3f7294-a422-47b1-a323-82a8ac718bdc-kube-api-access-4cxrr" (OuterVolumeSpecName: "kube-api-access-4cxrr") pod "8c3f7294-a422-47b1-a323-82a8ac718bdc" (UID: "8c3f7294-a422-47b1-a323-82a8ac718bdc"). InnerVolumeSpecName "kube-api-access-4cxrr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:03:38 crc kubenswrapper[4910]: I0105 22:03:38.140850 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8c3f7294-a422-47b1-a323-82a8ac718bdc-util" (OuterVolumeSpecName: "util") pod "8c3f7294-a422-47b1-a323-82a8ac718bdc" (UID: "8c3f7294-a422-47b1-a323-82a8ac718bdc"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:03:38 crc kubenswrapper[4910]: I0105 22:03:38.232480 4910 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8c3f7294-a422-47b1-a323-82a8ac718bdc-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:03:38 crc kubenswrapper[4910]: I0105 22:03:38.232525 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4cxrr\" (UniqueName: \"kubernetes.io/projected/8c3f7294-a422-47b1-a323-82a8ac718bdc-kube-api-access-4cxrr\") on node \"crc\" DevicePath \"\"" Jan 05 22:03:38 crc kubenswrapper[4910]: I0105 22:03:38.232535 4910 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8c3f7294-a422-47b1-a323-82a8ac718bdc-util\") on node \"crc\" DevicePath \"\"" Jan 05 22:03:38 crc kubenswrapper[4910]: I0105 22:03:38.660774 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" event={"ID":"8c3f7294-a422-47b1-a323-82a8ac718bdc","Type":"ContainerDied","Data":"690f8e81e116021eeb8ece9f2cfbbc196077256ab368f0c88feb9111e3d16d46"} Jan 05 22:03:38 crc kubenswrapper[4910]: I0105 22:03:38.660836 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="690f8e81e116021eeb8ece9f2cfbbc196077256ab368f0c88feb9111e3d16d46" Jan 05 22:03:38 crc kubenswrapper[4910]: I0105 22:03:38.660911 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx" Jan 05 22:03:44 crc kubenswrapper[4910]: I0105 22:03:44.014452 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-6769fb99d-47b86"] Jan 05 22:03:44 crc kubenswrapper[4910]: E0105 22:03:44.015258 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c3f7294-a422-47b1-a323-82a8ac718bdc" containerName="util" Jan 05 22:03:44 crc kubenswrapper[4910]: I0105 22:03:44.015271 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c3f7294-a422-47b1-a323-82a8ac718bdc" containerName="util" Jan 05 22:03:44 crc kubenswrapper[4910]: E0105 22:03:44.015283 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c3f7294-a422-47b1-a323-82a8ac718bdc" containerName="pull" Jan 05 22:03:44 crc kubenswrapper[4910]: I0105 22:03:44.015289 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c3f7294-a422-47b1-a323-82a8ac718bdc" containerName="pull" Jan 05 22:03:44 crc kubenswrapper[4910]: E0105 22:03:44.015306 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c3f7294-a422-47b1-a323-82a8ac718bdc" containerName="extract" Jan 05 22:03:44 crc kubenswrapper[4910]: I0105 22:03:44.015311 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c3f7294-a422-47b1-a323-82a8ac718bdc" containerName="extract" Jan 05 22:03:44 crc kubenswrapper[4910]: I0105 22:03:44.015424 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c3f7294-a422-47b1-a323-82a8ac718bdc" containerName="extract" Jan 05 22:03:44 crc kubenswrapper[4910]: I0105 22:03:44.015810 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-6769fb99d-47b86" Jan 05 22:03:44 crc kubenswrapper[4910]: I0105 22:03:44.018525 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Jan 05 22:03:44 crc kubenswrapper[4910]: I0105 22:03:44.028544 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Jan 05 22:03:44 crc kubenswrapper[4910]: I0105 22:03:44.028544 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-mx4cz" Jan 05 22:03:44 crc kubenswrapper[4910]: I0105 22:03:44.045581 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-6769fb99d-47b86"] Jan 05 22:03:44 crc kubenswrapper[4910]: I0105 22:03:44.128084 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4wt5b\" (UniqueName: \"kubernetes.io/projected/4d1fecfb-765b-420d-b427-1d3b9f5f14f7-kube-api-access-4wt5b\") pod \"nmstate-operator-6769fb99d-47b86\" (UID: \"4d1fecfb-765b-420d-b427-1d3b9f5f14f7\") " pod="openshift-nmstate/nmstate-operator-6769fb99d-47b86" Jan 05 22:03:44 crc kubenswrapper[4910]: I0105 22:03:44.229239 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4wt5b\" (UniqueName: \"kubernetes.io/projected/4d1fecfb-765b-420d-b427-1d3b9f5f14f7-kube-api-access-4wt5b\") pod \"nmstate-operator-6769fb99d-47b86\" (UID: \"4d1fecfb-765b-420d-b427-1d3b9f5f14f7\") " pod="openshift-nmstate/nmstate-operator-6769fb99d-47b86" Jan 05 22:03:44 crc kubenswrapper[4910]: I0105 22:03:44.252446 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4wt5b\" (UniqueName: \"kubernetes.io/projected/4d1fecfb-765b-420d-b427-1d3b9f5f14f7-kube-api-access-4wt5b\") pod \"nmstate-operator-6769fb99d-47b86\" (UID: \"4d1fecfb-765b-420d-b427-1d3b9f5f14f7\") " pod="openshift-nmstate/nmstate-operator-6769fb99d-47b86" Jan 05 22:03:44 crc kubenswrapper[4910]: I0105 22:03:44.332284 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-6769fb99d-47b86" Jan 05 22:03:44 crc kubenswrapper[4910]: I0105 22:03:44.831521 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-6769fb99d-47b86"] Jan 05 22:03:45 crc kubenswrapper[4910]: I0105 22:03:45.706825 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-6769fb99d-47b86" event={"ID":"4d1fecfb-765b-420d-b427-1d3b9f5f14f7","Type":"ContainerStarted","Data":"b0332a228928d601380f715826e9cffe3c57a825755e4946f902d25dcc7ac586"} Jan 05 22:03:47 crc kubenswrapper[4910]: I0105 22:03:47.721296 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-6769fb99d-47b86" event={"ID":"4d1fecfb-765b-420d-b427-1d3b9f5f14f7","Type":"ContainerStarted","Data":"359b30b868de1aa922421ad8149716a2b78db509fabe4f742ff58933b27dae05"} Jan 05 22:03:47 crc kubenswrapper[4910]: I0105 22:03:47.737823 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-6769fb99d-47b86" podStartSLOduration=2.212319651 podStartE2EDuration="4.737797558s" podCreationTimestamp="2026-01-05 22:03:43 +0000 UTC" firstStartedPulling="2026-01-05 22:03:44.845709906 +0000 UTC m=+756.423207566" lastFinishedPulling="2026-01-05 22:03:47.371187793 +0000 UTC m=+758.948685473" observedRunningTime="2026-01-05 22:03:47.735559687 +0000 UTC m=+759.313057357" watchObservedRunningTime="2026-01-05 22:03:47.737797558 +0000 UTC m=+759.315295238" Jan 05 22:03:50 crc kubenswrapper[4910]: I0105 22:03:50.093744 4910 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 05 22:03:52 crc kubenswrapper[4910]: I0105 22:03:52.805068 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-7f7f7578db-4vzdw"] Jan 05 22:03:52 crc kubenswrapper[4910]: I0105 22:03:52.806431 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f7f7578db-4vzdw" Jan 05 22:03:52 crc kubenswrapper[4910]: I0105 22:03:52.808945 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-bh72q" Jan 05 22:03:52 crc kubenswrapper[4910]: I0105 22:03:52.826404 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-f8fb84555-gc9w5"] Jan 05 22:03:52 crc kubenswrapper[4910]: I0105 22:03:52.827284 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-f8fb84555-gc9w5" Jan 05 22:03:52 crc kubenswrapper[4910]: I0105 22:03:52.832568 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Jan 05 22:03:52 crc kubenswrapper[4910]: I0105 22:03:52.840265 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-f8fb84555-gc9w5"] Jan 05 22:03:52 crc kubenswrapper[4910]: I0105 22:03:52.843071 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f7f7578db-4vzdw"] Jan 05 22:03:52 crc kubenswrapper[4910]: I0105 22:03:52.858598 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-ww7rm"] Jan 05 22:03:52 crc kubenswrapper[4910]: I0105 22:03:52.860019 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-ww7rm" Jan 05 22:03:52 crc kubenswrapper[4910]: I0105 22:03:52.961854 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zg9zp\" (UniqueName: \"kubernetes.io/projected/d6b13462-1840-44b9-b85a-44a40509a366-kube-api-access-zg9zp\") pod \"nmstate-metrics-7f7f7578db-4vzdw\" (UID: \"d6b13462-1840-44b9-b85a-44a40509a366\") " pod="openshift-nmstate/nmstate-metrics-7f7f7578db-4vzdw" Jan 05 22:03:52 crc kubenswrapper[4910]: I0105 22:03:52.961905 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rpxpq\" (UniqueName: \"kubernetes.io/projected/3fc78fdb-7ef9-4185-b690-8a249946e4b9-kube-api-access-rpxpq\") pod \"nmstate-webhook-f8fb84555-gc9w5\" (UID: \"3fc78fdb-7ef9-4185-b690-8a249946e4b9\") " pod="openshift-nmstate/nmstate-webhook-f8fb84555-gc9w5" Jan 05 22:03:52 crc kubenswrapper[4910]: I0105 22:03:52.961937 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/d75fd82b-afb0-400e-9db1-57e5d187dfbc-dbus-socket\") pod \"nmstate-handler-ww7rm\" (UID: \"d75fd82b-afb0-400e-9db1-57e5d187dfbc\") " pod="openshift-nmstate/nmstate-handler-ww7rm" Jan 05 22:03:52 crc kubenswrapper[4910]: I0105 22:03:52.961963 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/3fc78fdb-7ef9-4185-b690-8a249946e4b9-tls-key-pair\") pod \"nmstate-webhook-f8fb84555-gc9w5\" (UID: \"3fc78fdb-7ef9-4185-b690-8a249946e4b9\") " pod="openshift-nmstate/nmstate-webhook-f8fb84555-gc9w5" Jan 05 22:03:52 crc kubenswrapper[4910]: I0105 22:03:52.961980 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/d75fd82b-afb0-400e-9db1-57e5d187dfbc-nmstate-lock\") pod \"nmstate-handler-ww7rm\" (UID: \"d75fd82b-afb0-400e-9db1-57e5d187dfbc\") " pod="openshift-nmstate/nmstate-handler-ww7rm" Jan 05 22:03:52 crc kubenswrapper[4910]: I0105 22:03:52.962002 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7f2dz\" (UniqueName: \"kubernetes.io/projected/d75fd82b-afb0-400e-9db1-57e5d187dfbc-kube-api-access-7f2dz\") pod \"nmstate-handler-ww7rm\" (UID: \"d75fd82b-afb0-400e-9db1-57e5d187dfbc\") " pod="openshift-nmstate/nmstate-handler-ww7rm" Jan 05 22:03:52 crc kubenswrapper[4910]: I0105 22:03:52.962280 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/d75fd82b-afb0-400e-9db1-57e5d187dfbc-ovs-socket\") pod \"nmstate-handler-ww7rm\" (UID: \"d75fd82b-afb0-400e-9db1-57e5d187dfbc\") " pod="openshift-nmstate/nmstate-handler-ww7rm" Jan 05 22:03:52 crc kubenswrapper[4910]: I0105 22:03:52.985721 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-6ff7998486-m62fj"] Jan 05 22:03:52 crc kubenswrapper[4910]: I0105 22:03:52.986872 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-m62fj" Jan 05 22:03:52 crc kubenswrapper[4910]: I0105 22:03:52.989592 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Jan 05 22:03:52 crc kubenswrapper[4910]: I0105 22:03:52.989734 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Jan 05 22:03:52 crc kubenswrapper[4910]: I0105 22:03:52.995533 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-6ff7998486-m62fj"] Jan 05 22:03:52 crc kubenswrapper[4910]: I0105 22:03:52.996048 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-jwj2m" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.063393 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zg9zp\" (UniqueName: \"kubernetes.io/projected/d6b13462-1840-44b9-b85a-44a40509a366-kube-api-access-zg9zp\") pod \"nmstate-metrics-7f7f7578db-4vzdw\" (UID: \"d6b13462-1840-44b9-b85a-44a40509a366\") " pod="openshift-nmstate/nmstate-metrics-7f7f7578db-4vzdw" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.063448 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rpxpq\" (UniqueName: \"kubernetes.io/projected/3fc78fdb-7ef9-4185-b690-8a249946e4b9-kube-api-access-rpxpq\") pod \"nmstate-webhook-f8fb84555-gc9w5\" (UID: \"3fc78fdb-7ef9-4185-b690-8a249946e4b9\") " pod="openshift-nmstate/nmstate-webhook-f8fb84555-gc9w5" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.063472 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/d75fd82b-afb0-400e-9db1-57e5d187dfbc-dbus-socket\") pod \"nmstate-handler-ww7rm\" (UID: \"d75fd82b-afb0-400e-9db1-57e5d187dfbc\") " pod="openshift-nmstate/nmstate-handler-ww7rm" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.063495 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/d75fd82b-afb0-400e-9db1-57e5d187dfbc-nmstate-lock\") pod \"nmstate-handler-ww7rm\" (UID: \"d75fd82b-afb0-400e-9db1-57e5d187dfbc\") " pod="openshift-nmstate/nmstate-handler-ww7rm" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.063514 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/3fc78fdb-7ef9-4185-b690-8a249946e4b9-tls-key-pair\") pod \"nmstate-webhook-f8fb84555-gc9w5\" (UID: \"3fc78fdb-7ef9-4185-b690-8a249946e4b9\") " pod="openshift-nmstate/nmstate-webhook-f8fb84555-gc9w5" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.063532 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7f2dz\" (UniqueName: \"kubernetes.io/projected/d75fd82b-afb0-400e-9db1-57e5d187dfbc-kube-api-access-7f2dz\") pod \"nmstate-handler-ww7rm\" (UID: \"d75fd82b-afb0-400e-9db1-57e5d187dfbc\") " pod="openshift-nmstate/nmstate-handler-ww7rm" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.063552 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/d75fd82b-afb0-400e-9db1-57e5d187dfbc-ovs-socket\") pod \"nmstate-handler-ww7rm\" (UID: \"d75fd82b-afb0-400e-9db1-57e5d187dfbc\") " pod="openshift-nmstate/nmstate-handler-ww7rm" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.063628 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/d75fd82b-afb0-400e-9db1-57e5d187dfbc-ovs-socket\") pod \"nmstate-handler-ww7rm\" (UID: \"d75fd82b-afb0-400e-9db1-57e5d187dfbc\") " pod="openshift-nmstate/nmstate-handler-ww7rm" Jan 05 22:03:53 crc kubenswrapper[4910]: E0105 22:03:53.064004 4910 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Jan 05 22:03:53 crc kubenswrapper[4910]: E0105 22:03:53.064082 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3fc78fdb-7ef9-4185-b690-8a249946e4b9-tls-key-pair podName:3fc78fdb-7ef9-4185-b690-8a249946e4b9 nodeName:}" failed. No retries permitted until 2026-01-05 22:03:53.564059987 +0000 UTC m=+765.141557857 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/3fc78fdb-7ef9-4185-b690-8a249946e4b9-tls-key-pair") pod "nmstate-webhook-f8fb84555-gc9w5" (UID: "3fc78fdb-7ef9-4185-b690-8a249946e4b9") : secret "openshift-nmstate-webhook" not found Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.064160 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/d75fd82b-afb0-400e-9db1-57e5d187dfbc-dbus-socket\") pod \"nmstate-handler-ww7rm\" (UID: \"d75fd82b-afb0-400e-9db1-57e5d187dfbc\") " pod="openshift-nmstate/nmstate-handler-ww7rm" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.064286 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/d75fd82b-afb0-400e-9db1-57e5d187dfbc-nmstate-lock\") pod \"nmstate-handler-ww7rm\" (UID: \"d75fd82b-afb0-400e-9db1-57e5d187dfbc\") " pod="openshift-nmstate/nmstate-handler-ww7rm" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.085752 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7f2dz\" (UniqueName: \"kubernetes.io/projected/d75fd82b-afb0-400e-9db1-57e5d187dfbc-kube-api-access-7f2dz\") pod \"nmstate-handler-ww7rm\" (UID: \"d75fd82b-afb0-400e-9db1-57e5d187dfbc\") " pod="openshift-nmstate/nmstate-handler-ww7rm" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.086082 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rpxpq\" (UniqueName: \"kubernetes.io/projected/3fc78fdb-7ef9-4185-b690-8a249946e4b9-kube-api-access-rpxpq\") pod \"nmstate-webhook-f8fb84555-gc9w5\" (UID: \"3fc78fdb-7ef9-4185-b690-8a249946e4b9\") " pod="openshift-nmstate/nmstate-webhook-f8fb84555-gc9w5" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.101401 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zg9zp\" (UniqueName: \"kubernetes.io/projected/d6b13462-1840-44b9-b85a-44a40509a366-kube-api-access-zg9zp\") pod \"nmstate-metrics-7f7f7578db-4vzdw\" (UID: \"d6b13462-1840-44b9-b85a-44a40509a366\") " pod="openshift-nmstate/nmstate-metrics-7f7f7578db-4vzdw" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.122872 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f7f7578db-4vzdw" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.164703 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pztlw\" (UniqueName: \"kubernetes.io/projected/8f07ce10-56ab-4b95-a099-1ab94c960aad-kube-api-access-pztlw\") pod \"nmstate-console-plugin-6ff7998486-m62fj\" (UID: \"8f07ce10-56ab-4b95-a099-1ab94c960aad\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-m62fj" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.164768 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/8f07ce10-56ab-4b95-a099-1ab94c960aad-plugin-serving-cert\") pod \"nmstate-console-plugin-6ff7998486-m62fj\" (UID: \"8f07ce10-56ab-4b95-a099-1ab94c960aad\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-m62fj" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.164805 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/8f07ce10-56ab-4b95-a099-1ab94c960aad-nginx-conf\") pod \"nmstate-console-plugin-6ff7998486-m62fj\" (UID: \"8f07ce10-56ab-4b95-a099-1ab94c960aad\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-m62fj" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.178844 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-ww7rm" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.198339 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-56dccc5ff8-t4t94"] Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.199450 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-56dccc5ff8-t4t94" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.220720 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-56dccc5ff8-t4t94"] Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.266279 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pztlw\" (UniqueName: \"kubernetes.io/projected/8f07ce10-56ab-4b95-a099-1ab94c960aad-kube-api-access-pztlw\") pod \"nmstate-console-plugin-6ff7998486-m62fj\" (UID: \"8f07ce10-56ab-4b95-a099-1ab94c960aad\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-m62fj" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.266312 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/8f07ce10-56ab-4b95-a099-1ab94c960aad-plugin-serving-cert\") pod \"nmstate-console-plugin-6ff7998486-m62fj\" (UID: \"8f07ce10-56ab-4b95-a099-1ab94c960aad\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-m62fj" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.266344 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/8f07ce10-56ab-4b95-a099-1ab94c960aad-nginx-conf\") pod \"nmstate-console-plugin-6ff7998486-m62fj\" (UID: \"8f07ce10-56ab-4b95-a099-1ab94c960aad\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-m62fj" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.267244 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/8f07ce10-56ab-4b95-a099-1ab94c960aad-nginx-conf\") pod \"nmstate-console-plugin-6ff7998486-m62fj\" (UID: \"8f07ce10-56ab-4b95-a099-1ab94c960aad\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-m62fj" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.275367 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/8f07ce10-56ab-4b95-a099-1ab94c960aad-plugin-serving-cert\") pod \"nmstate-console-plugin-6ff7998486-m62fj\" (UID: \"8f07ce10-56ab-4b95-a099-1ab94c960aad\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-m62fj" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.286652 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pztlw\" (UniqueName: \"kubernetes.io/projected/8f07ce10-56ab-4b95-a099-1ab94c960aad-kube-api-access-pztlw\") pod \"nmstate-console-plugin-6ff7998486-m62fj\" (UID: \"8f07ce10-56ab-4b95-a099-1ab94c960aad\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-m62fj" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.303150 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-m62fj" Jan 05 22:03:53 crc kubenswrapper[4910]: W0105 22:03:53.346113 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd6b13462_1840_44b9_b85a_44a40509a366.slice/crio-a1b14a25c4710c38847bbe4a0369b103a4e6d602660a2188c8c229be21c71489 WatchSource:0}: Error finding container a1b14a25c4710c38847bbe4a0369b103a4e6d602660a2188c8c229be21c71489: Status 404 returned error can't find the container with id a1b14a25c4710c38847bbe4a0369b103a4e6d602660a2188c8c229be21c71489 Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.353213 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f7f7578db-4vzdw"] Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.369639 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2870ea67-0782-4f92-8876-ade6e7cd2c1f-oauth-serving-cert\") pod \"console-56dccc5ff8-t4t94\" (UID: \"2870ea67-0782-4f92-8876-ade6e7cd2c1f\") " pod="openshift-console/console-56dccc5ff8-t4t94" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.369705 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2870ea67-0782-4f92-8876-ade6e7cd2c1f-console-serving-cert\") pod \"console-56dccc5ff8-t4t94\" (UID: \"2870ea67-0782-4f92-8876-ade6e7cd2c1f\") " pod="openshift-console/console-56dccc5ff8-t4t94" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.369731 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2870ea67-0782-4f92-8876-ade6e7cd2c1f-console-config\") pod \"console-56dccc5ff8-t4t94\" (UID: \"2870ea67-0782-4f92-8876-ade6e7cd2c1f\") " pod="openshift-console/console-56dccc5ff8-t4t94" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.369901 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2870ea67-0782-4f92-8876-ade6e7cd2c1f-console-oauth-config\") pod \"console-56dccc5ff8-t4t94\" (UID: \"2870ea67-0782-4f92-8876-ade6e7cd2c1f\") " pod="openshift-console/console-56dccc5ff8-t4t94" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.370046 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2870ea67-0782-4f92-8876-ade6e7cd2c1f-service-ca\") pod \"console-56dccc5ff8-t4t94\" (UID: \"2870ea67-0782-4f92-8876-ade6e7cd2c1f\") " pod="openshift-console/console-56dccc5ff8-t4t94" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.370142 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-24n46\" (UniqueName: \"kubernetes.io/projected/2870ea67-0782-4f92-8876-ade6e7cd2c1f-kube-api-access-24n46\") pod \"console-56dccc5ff8-t4t94\" (UID: \"2870ea67-0782-4f92-8876-ade6e7cd2c1f\") " pod="openshift-console/console-56dccc5ff8-t4t94" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.370194 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2870ea67-0782-4f92-8876-ade6e7cd2c1f-trusted-ca-bundle\") pod \"console-56dccc5ff8-t4t94\" (UID: \"2870ea67-0782-4f92-8876-ade6e7cd2c1f\") " pod="openshift-console/console-56dccc5ff8-t4t94" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.472317 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-24n46\" (UniqueName: \"kubernetes.io/projected/2870ea67-0782-4f92-8876-ade6e7cd2c1f-kube-api-access-24n46\") pod \"console-56dccc5ff8-t4t94\" (UID: \"2870ea67-0782-4f92-8876-ade6e7cd2c1f\") " pod="openshift-console/console-56dccc5ff8-t4t94" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.472368 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2870ea67-0782-4f92-8876-ade6e7cd2c1f-trusted-ca-bundle\") pod \"console-56dccc5ff8-t4t94\" (UID: \"2870ea67-0782-4f92-8876-ade6e7cd2c1f\") " pod="openshift-console/console-56dccc5ff8-t4t94" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.472405 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2870ea67-0782-4f92-8876-ade6e7cd2c1f-oauth-serving-cert\") pod \"console-56dccc5ff8-t4t94\" (UID: \"2870ea67-0782-4f92-8876-ade6e7cd2c1f\") " pod="openshift-console/console-56dccc5ff8-t4t94" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.472433 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2870ea67-0782-4f92-8876-ade6e7cd2c1f-console-serving-cert\") pod \"console-56dccc5ff8-t4t94\" (UID: \"2870ea67-0782-4f92-8876-ade6e7cd2c1f\") " pod="openshift-console/console-56dccc5ff8-t4t94" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.472450 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2870ea67-0782-4f92-8876-ade6e7cd2c1f-console-config\") pod \"console-56dccc5ff8-t4t94\" (UID: \"2870ea67-0782-4f92-8876-ade6e7cd2c1f\") " pod="openshift-console/console-56dccc5ff8-t4t94" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.472488 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2870ea67-0782-4f92-8876-ade6e7cd2c1f-console-oauth-config\") pod \"console-56dccc5ff8-t4t94\" (UID: \"2870ea67-0782-4f92-8876-ade6e7cd2c1f\") " pod="openshift-console/console-56dccc5ff8-t4t94" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.472530 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2870ea67-0782-4f92-8876-ade6e7cd2c1f-service-ca\") pod \"console-56dccc5ff8-t4t94\" (UID: \"2870ea67-0782-4f92-8876-ade6e7cd2c1f\") " pod="openshift-console/console-56dccc5ff8-t4t94" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.473372 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2870ea67-0782-4f92-8876-ade6e7cd2c1f-service-ca\") pod \"console-56dccc5ff8-t4t94\" (UID: \"2870ea67-0782-4f92-8876-ade6e7cd2c1f\") " pod="openshift-console/console-56dccc5ff8-t4t94" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.474231 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2870ea67-0782-4f92-8876-ade6e7cd2c1f-console-config\") pod \"console-56dccc5ff8-t4t94\" (UID: \"2870ea67-0782-4f92-8876-ade6e7cd2c1f\") " pod="openshift-console/console-56dccc5ff8-t4t94" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.474247 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2870ea67-0782-4f92-8876-ade6e7cd2c1f-trusted-ca-bundle\") pod \"console-56dccc5ff8-t4t94\" (UID: \"2870ea67-0782-4f92-8876-ade6e7cd2c1f\") " pod="openshift-console/console-56dccc5ff8-t4t94" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.476305 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2870ea67-0782-4f92-8876-ade6e7cd2c1f-console-serving-cert\") pod \"console-56dccc5ff8-t4t94\" (UID: \"2870ea67-0782-4f92-8876-ade6e7cd2c1f\") " pod="openshift-console/console-56dccc5ff8-t4t94" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.476443 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2870ea67-0782-4f92-8876-ade6e7cd2c1f-oauth-serving-cert\") pod \"console-56dccc5ff8-t4t94\" (UID: \"2870ea67-0782-4f92-8876-ade6e7cd2c1f\") " pod="openshift-console/console-56dccc5ff8-t4t94" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.480547 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2870ea67-0782-4f92-8876-ade6e7cd2c1f-console-oauth-config\") pod \"console-56dccc5ff8-t4t94\" (UID: \"2870ea67-0782-4f92-8876-ade6e7cd2c1f\") " pod="openshift-console/console-56dccc5ff8-t4t94" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.494602 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-24n46\" (UniqueName: \"kubernetes.io/projected/2870ea67-0782-4f92-8876-ade6e7cd2c1f-kube-api-access-24n46\") pod \"console-56dccc5ff8-t4t94\" (UID: \"2870ea67-0782-4f92-8876-ade6e7cd2c1f\") " pod="openshift-console/console-56dccc5ff8-t4t94" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.499999 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-6ff7998486-m62fj"] Jan 05 22:03:53 crc kubenswrapper[4910]: W0105 22:03:53.501984 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8f07ce10_56ab_4b95_a099_1ab94c960aad.slice/crio-49186c2e409bbad355c8337014665bb0eea7b3168173eb9e8c4d64fb335a8bc3 WatchSource:0}: Error finding container 49186c2e409bbad355c8337014665bb0eea7b3168173eb9e8c4d64fb335a8bc3: Status 404 returned error can't find the container with id 49186c2e409bbad355c8337014665bb0eea7b3168173eb9e8c4d64fb335a8bc3 Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.544111 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-56dccc5ff8-t4t94" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.576093 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/3fc78fdb-7ef9-4185-b690-8a249946e4b9-tls-key-pair\") pod \"nmstate-webhook-f8fb84555-gc9w5\" (UID: \"3fc78fdb-7ef9-4185-b690-8a249946e4b9\") " pod="openshift-nmstate/nmstate-webhook-f8fb84555-gc9w5" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.579522 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/3fc78fdb-7ef9-4185-b690-8a249946e4b9-tls-key-pair\") pod \"nmstate-webhook-f8fb84555-gc9w5\" (UID: \"3fc78fdb-7ef9-4185-b690-8a249946e4b9\") " pod="openshift-nmstate/nmstate-webhook-f8fb84555-gc9w5" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.725724 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-56dccc5ff8-t4t94"] Jan 05 22:03:53 crc kubenswrapper[4910]: W0105 22:03:53.727493 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2870ea67_0782_4f92_8876_ade6e7cd2c1f.slice/crio-75334d3095c7baf6766ee5d1f57ddbb75da1b5f0ed299b9431857c13f3c6fac6 WatchSource:0}: Error finding container 75334d3095c7baf6766ee5d1f57ddbb75da1b5f0ed299b9431857c13f3c6fac6: Status 404 returned error can't find the container with id 75334d3095c7baf6766ee5d1f57ddbb75da1b5f0ed299b9431857c13f3c6fac6 Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.741759 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-f8fb84555-gc9w5" Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.776360 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-m62fj" event={"ID":"8f07ce10-56ab-4b95-a099-1ab94c960aad","Type":"ContainerStarted","Data":"49186c2e409bbad355c8337014665bb0eea7b3168173eb9e8c4d64fb335a8bc3"} Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.777217 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f7f7578db-4vzdw" event={"ID":"d6b13462-1840-44b9-b85a-44a40509a366","Type":"ContainerStarted","Data":"a1b14a25c4710c38847bbe4a0369b103a4e6d602660a2188c8c229be21c71489"} Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.778407 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-ww7rm" event={"ID":"d75fd82b-afb0-400e-9db1-57e5d187dfbc","Type":"ContainerStarted","Data":"8764c3164142ff8bccfede77e38df2f61953d1d8f324a787106fa471e1b14138"} Jan 05 22:03:53 crc kubenswrapper[4910]: I0105 22:03:53.779529 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-56dccc5ff8-t4t94" event={"ID":"2870ea67-0782-4f92-8876-ade6e7cd2c1f","Type":"ContainerStarted","Data":"75334d3095c7baf6766ee5d1f57ddbb75da1b5f0ed299b9431857c13f3c6fac6"} Jan 05 22:03:54 crc kubenswrapper[4910]: I0105 22:03:54.182895 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-f8fb84555-gc9w5"] Jan 05 22:03:54 crc kubenswrapper[4910]: W0105 22:03:54.196628 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3fc78fdb_7ef9_4185_b690_8a249946e4b9.slice/crio-051e60324d3fc2a2b7b527e35cef867693ff667d89212c4043034189511f0695 WatchSource:0}: Error finding container 051e60324d3fc2a2b7b527e35cef867693ff667d89212c4043034189511f0695: Status 404 returned error can't find the container with id 051e60324d3fc2a2b7b527e35cef867693ff667d89212c4043034189511f0695 Jan 05 22:03:54 crc kubenswrapper[4910]: I0105 22:03:54.789357 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-56dccc5ff8-t4t94" event={"ID":"2870ea67-0782-4f92-8876-ade6e7cd2c1f","Type":"ContainerStarted","Data":"1af6f1a508594112731911236212512df182712ba49ce51eb43361341302b762"} Jan 05 22:03:54 crc kubenswrapper[4910]: I0105 22:03:54.790814 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-f8fb84555-gc9w5" event={"ID":"3fc78fdb-7ef9-4185-b690-8a249946e4b9","Type":"ContainerStarted","Data":"051e60324d3fc2a2b7b527e35cef867693ff667d89212c4043034189511f0695"} Jan 05 22:03:56 crc kubenswrapper[4910]: I0105 22:03:56.803999 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f7f7578db-4vzdw" event={"ID":"d6b13462-1840-44b9-b85a-44a40509a366","Type":"ContainerStarted","Data":"24aa7986aa35dae5f3da86db31c703275395e2aeb4b6576b6ecd22e4a48efc6a"} Jan 05 22:03:56 crc kubenswrapper[4910]: I0105 22:03:56.807102 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-f8fb84555-gc9w5" event={"ID":"3fc78fdb-7ef9-4185-b690-8a249946e4b9","Type":"ContainerStarted","Data":"ab34cb751ff07846a083d05b045a3c9b9255b28e0fc4ab9dce639fc43395a868"} Jan 05 22:03:56 crc kubenswrapper[4910]: I0105 22:03:56.807414 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-f8fb84555-gc9w5" Jan 05 22:03:56 crc kubenswrapper[4910]: I0105 22:03:56.808658 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-ww7rm" event={"ID":"d75fd82b-afb0-400e-9db1-57e5d187dfbc","Type":"ContainerStarted","Data":"1430cc2abaa87cd6c7569ee2498c9578d76689435bd62c03d95110ab4fa1553f"} Jan 05 22:03:56 crc kubenswrapper[4910]: I0105 22:03:56.808788 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-ww7rm" Jan 05 22:03:56 crc kubenswrapper[4910]: I0105 22:03:56.810858 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-m62fj" event={"ID":"8f07ce10-56ab-4b95-a099-1ab94c960aad","Type":"ContainerStarted","Data":"b2c6cc449bb6e9740245e1ac4bd7a8ce47230ad8ce9cc674057e9bb1ac78ebb3"} Jan 05 22:03:56 crc kubenswrapper[4910]: I0105 22:03:56.835614 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-56dccc5ff8-t4t94" podStartSLOduration=3.835583436 podStartE2EDuration="3.835583436s" podCreationTimestamp="2026-01-05 22:03:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:03:54.812484156 +0000 UTC m=+766.389981826" watchObservedRunningTime="2026-01-05 22:03:56.835583436 +0000 UTC m=+768.413081106" Jan 05 22:03:56 crc kubenswrapper[4910]: I0105 22:03:56.837660 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-f8fb84555-gc9w5" podStartSLOduration=2.522458924 podStartE2EDuration="4.837648053s" podCreationTimestamp="2026-01-05 22:03:52 +0000 UTC" firstStartedPulling="2026-01-05 22:03:54.199645766 +0000 UTC m=+765.777143446" lastFinishedPulling="2026-01-05 22:03:56.514834905 +0000 UTC m=+768.092332575" observedRunningTime="2026-01-05 22:03:56.827898282 +0000 UTC m=+768.405395992" watchObservedRunningTime="2026-01-05 22:03:56.837648053 +0000 UTC m=+768.415145723" Jan 05 22:03:56 crc kubenswrapper[4910]: I0105 22:03:56.907723 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-m62fj" podStartSLOduration=1.906018092 podStartE2EDuration="4.907698018s" podCreationTimestamp="2026-01-05 22:03:52 +0000 UTC" firstStartedPulling="2026-01-05 22:03:53.504901041 +0000 UTC m=+765.082398711" lastFinishedPulling="2026-01-05 22:03:56.506580967 +0000 UTC m=+768.084078637" observedRunningTime="2026-01-05 22:03:56.893925975 +0000 UTC m=+768.471423645" watchObservedRunningTime="2026-01-05 22:03:56.907698018 +0000 UTC m=+768.485195708" Jan 05 22:03:56 crc kubenswrapper[4910]: I0105 22:03:56.908457 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-ww7rm" podStartSLOduration=1.625969627 podStartE2EDuration="4.908450845s" podCreationTimestamp="2026-01-05 22:03:52 +0000 UTC" firstStartedPulling="2026-01-05 22:03:53.237272269 +0000 UTC m=+764.814769939" lastFinishedPulling="2026-01-05 22:03:56.519753487 +0000 UTC m=+768.097251157" observedRunningTime="2026-01-05 22:03:56.857568937 +0000 UTC m=+768.435066607" watchObservedRunningTime="2026-01-05 22:03:56.908450845 +0000 UTC m=+768.485948525" Jan 05 22:03:59 crc kubenswrapper[4910]: I0105 22:03:59.834606 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f7f7578db-4vzdw" event={"ID":"d6b13462-1840-44b9-b85a-44a40509a366","Type":"ContainerStarted","Data":"3e8a3afb22c31f5800fa084b51a4a17163eafbe2b8775006620dfd38f8c40979"} Jan 05 22:03:59 crc kubenswrapper[4910]: I0105 22:03:59.855545 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-7f7f7578db-4vzdw" podStartSLOduration=1.8947729660000001 podStartE2EDuration="7.855516778s" podCreationTimestamp="2026-01-05 22:03:52 +0000 UTC" firstStartedPulling="2026-01-05 22:03:53.350787243 +0000 UTC m=+764.928284913" lastFinishedPulling="2026-01-05 22:03:59.311531055 +0000 UTC m=+770.889028725" observedRunningTime="2026-01-05 22:03:59.854092025 +0000 UTC m=+771.431589715" watchObservedRunningTime="2026-01-05 22:03:59.855516778 +0000 UTC m=+771.433014478" Jan 05 22:04:03 crc kubenswrapper[4910]: I0105 22:04:03.213852 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-ww7rm" Jan 05 22:04:03 crc kubenswrapper[4910]: I0105 22:04:03.544920 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-56dccc5ff8-t4t94" Jan 05 22:04:03 crc kubenswrapper[4910]: I0105 22:04:03.545009 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-56dccc5ff8-t4t94" Jan 05 22:04:03 crc kubenswrapper[4910]: I0105 22:04:03.552757 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-56dccc5ff8-t4t94" Jan 05 22:04:03 crc kubenswrapper[4910]: I0105 22:04:03.867880 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-56dccc5ff8-t4t94" Jan 05 22:04:03 crc kubenswrapper[4910]: I0105 22:04:03.928853 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-g5xxj"] Jan 05 22:04:10 crc kubenswrapper[4910]: I0105 22:04:10.952268 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:04:10 crc kubenswrapper[4910]: I0105 22:04:10.952841 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:04:13 crc kubenswrapper[4910]: I0105 22:04:13.751963 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-f8fb84555-gc9w5" Jan 05 22:04:26 crc kubenswrapper[4910]: I0105 22:04:26.224730 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx"] Jan 05 22:04:26 crc kubenswrapper[4910]: I0105 22:04:26.226907 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx" Jan 05 22:04:26 crc kubenswrapper[4910]: I0105 22:04:26.229374 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 05 22:04:26 crc kubenswrapper[4910]: I0105 22:04:26.242614 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx"] Jan 05 22:04:26 crc kubenswrapper[4910]: I0105 22:04:26.290458 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/14994d1c-8f9e-4eab-b9fe-994f9910317b-bundle\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx\" (UID: \"14994d1c-8f9e-4eab-b9fe-994f9910317b\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx" Jan 05 22:04:26 crc kubenswrapper[4910]: I0105 22:04:26.290544 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vqnd4\" (UniqueName: \"kubernetes.io/projected/14994d1c-8f9e-4eab-b9fe-994f9910317b-kube-api-access-vqnd4\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx\" (UID: \"14994d1c-8f9e-4eab-b9fe-994f9910317b\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx" Jan 05 22:04:26 crc kubenswrapper[4910]: I0105 22:04:26.290718 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/14994d1c-8f9e-4eab-b9fe-994f9910317b-util\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx\" (UID: \"14994d1c-8f9e-4eab-b9fe-994f9910317b\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx" Jan 05 22:04:26 crc kubenswrapper[4910]: I0105 22:04:26.392093 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/14994d1c-8f9e-4eab-b9fe-994f9910317b-util\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx\" (UID: \"14994d1c-8f9e-4eab-b9fe-994f9910317b\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx" Jan 05 22:04:26 crc kubenswrapper[4910]: I0105 22:04:26.392215 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/14994d1c-8f9e-4eab-b9fe-994f9910317b-bundle\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx\" (UID: \"14994d1c-8f9e-4eab-b9fe-994f9910317b\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx" Jan 05 22:04:26 crc kubenswrapper[4910]: I0105 22:04:26.392242 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vqnd4\" (UniqueName: \"kubernetes.io/projected/14994d1c-8f9e-4eab-b9fe-994f9910317b-kube-api-access-vqnd4\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx\" (UID: \"14994d1c-8f9e-4eab-b9fe-994f9910317b\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx" Jan 05 22:04:26 crc kubenswrapper[4910]: I0105 22:04:26.393414 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/14994d1c-8f9e-4eab-b9fe-994f9910317b-bundle\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx\" (UID: \"14994d1c-8f9e-4eab-b9fe-994f9910317b\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx" Jan 05 22:04:26 crc kubenswrapper[4910]: I0105 22:04:26.393438 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/14994d1c-8f9e-4eab-b9fe-994f9910317b-util\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx\" (UID: \"14994d1c-8f9e-4eab-b9fe-994f9910317b\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx" Jan 05 22:04:26 crc kubenswrapper[4910]: I0105 22:04:26.413678 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vqnd4\" (UniqueName: \"kubernetes.io/projected/14994d1c-8f9e-4eab-b9fe-994f9910317b-kube-api-access-vqnd4\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx\" (UID: \"14994d1c-8f9e-4eab-b9fe-994f9910317b\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx" Jan 05 22:04:26 crc kubenswrapper[4910]: I0105 22:04:26.547098 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx" Jan 05 22:04:26 crc kubenswrapper[4910]: I0105 22:04:26.799175 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx"] Jan 05 22:04:27 crc kubenswrapper[4910]: I0105 22:04:27.016381 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx" event={"ID":"14994d1c-8f9e-4eab-b9fe-994f9910317b","Type":"ContainerStarted","Data":"db2683fef0e917a7937877b21c0105c749642e02d4bd1f3aeb7cd3ccea6d8043"} Jan 05 22:04:28 crc kubenswrapper[4910]: I0105 22:04:28.030356 4910 generic.go:334] "Generic (PLEG): container finished" podID="14994d1c-8f9e-4eab-b9fe-994f9910317b" containerID="82015a7e1800e55185ee09c7d50b334ce80d5b49328afbf4bd31baa441153c06" exitCode=0 Jan 05 22:04:28 crc kubenswrapper[4910]: I0105 22:04:28.030407 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx" event={"ID":"14994d1c-8f9e-4eab-b9fe-994f9910317b","Type":"ContainerDied","Data":"82015a7e1800e55185ee09c7d50b334ce80d5b49328afbf4bd31baa441153c06"} Jan 05 22:04:28 crc kubenswrapper[4910]: I0105 22:04:28.988493 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-g5xxj" podUID="8cef9cdb-d8f8-406b-8575-6a6d1b72a638" containerName="console" containerID="cri-o://b74bab86f9cf34833f34affea1f044a4bfa513efc09a52b967914d96933b751c" gracePeriod=15 Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.413525 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-g5xxj_8cef9cdb-d8f8-406b-8575-6a6d1b72a638/console/0.log" Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.413992 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-g5xxj" Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.541095 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-trusted-ca-bundle\") pod \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\" (UID: \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\") " Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.541701 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-console-config\") pod \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\" (UID: \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\") " Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.541952 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-console-oauth-config\") pod \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\" (UID: \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\") " Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.542218 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-service-ca\") pod \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\" (UID: \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\") " Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.542436 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4bwx5\" (UniqueName: \"kubernetes.io/projected/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-kube-api-access-4bwx5\") pod \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\" (UID: \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\") " Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.542648 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-console-serving-cert\") pod \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\" (UID: \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\") " Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.542887 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-oauth-serving-cert\") pod \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\" (UID: \"8cef9cdb-d8f8-406b-8575-6a6d1b72a638\") " Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.542507 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-console-config" (OuterVolumeSpecName: "console-config") pod "8cef9cdb-d8f8-406b-8575-6a6d1b72a638" (UID: "8cef9cdb-d8f8-406b-8575-6a6d1b72a638"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.543070 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-service-ca" (OuterVolumeSpecName: "service-ca") pod "8cef9cdb-d8f8-406b-8575-6a6d1b72a638" (UID: "8cef9cdb-d8f8-406b-8575-6a6d1b72a638"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.542721 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "8cef9cdb-d8f8-406b-8575-6a6d1b72a638" (UID: "8cef9cdb-d8f8-406b-8575-6a6d1b72a638"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.543839 4910 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.544035 4910 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-console-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.544241 4910 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-service-ca\") on node \"crc\" DevicePath \"\"" Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.544026 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "8cef9cdb-d8f8-406b-8575-6a6d1b72a638" (UID: "8cef9cdb-d8f8-406b-8575-6a6d1b72a638"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.555458 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-kube-api-access-4bwx5" (OuterVolumeSpecName: "kube-api-access-4bwx5") pod "8cef9cdb-d8f8-406b-8575-6a6d1b72a638" (UID: "8cef9cdb-d8f8-406b-8575-6a6d1b72a638"). InnerVolumeSpecName "kube-api-access-4bwx5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.556167 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "8cef9cdb-d8f8-406b-8575-6a6d1b72a638" (UID: "8cef9cdb-d8f8-406b-8575-6a6d1b72a638"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.559293 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "8cef9cdb-d8f8-406b-8575-6a6d1b72a638" (UID: "8cef9cdb-d8f8-406b-8575-6a6d1b72a638"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.565964 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-k44j8"] Jan 05 22:04:29 crc kubenswrapper[4910]: E0105 22:04:29.566306 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cef9cdb-d8f8-406b-8575-6a6d1b72a638" containerName="console" Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.566325 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cef9cdb-d8f8-406b-8575-6a6d1b72a638" containerName="console" Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.566800 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="8cef9cdb-d8f8-406b-8575-6a6d1b72a638" containerName="console" Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.568865 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-k44j8" Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.590405 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-k44j8"] Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.652980 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0530f826-c983-428a-9263-ce4c30dc8185-utilities\") pod \"redhat-operators-k44j8\" (UID: \"0530f826-c983-428a-9263-ce4c30dc8185\") " pod="openshift-marketplace/redhat-operators-k44j8" Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.653050 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9dqlg\" (UniqueName: \"kubernetes.io/projected/0530f826-c983-428a-9263-ce4c30dc8185-kube-api-access-9dqlg\") pod \"redhat-operators-k44j8\" (UID: \"0530f826-c983-428a-9263-ce4c30dc8185\") " pod="openshift-marketplace/redhat-operators-k44j8" Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.653072 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0530f826-c983-428a-9263-ce4c30dc8185-catalog-content\") pod \"redhat-operators-k44j8\" (UID: \"0530f826-c983-428a-9263-ce4c30dc8185\") " pod="openshift-marketplace/redhat-operators-k44j8" Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.653170 4910 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.653183 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4bwx5\" (UniqueName: \"kubernetes.io/projected/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-kube-api-access-4bwx5\") on node \"crc\" DevicePath \"\"" Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.653194 4910 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.653203 4910 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/8cef9cdb-d8f8-406b-8575-6a6d1b72a638-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.753640 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0530f826-c983-428a-9263-ce4c30dc8185-utilities\") pod \"redhat-operators-k44j8\" (UID: \"0530f826-c983-428a-9263-ce4c30dc8185\") " pod="openshift-marketplace/redhat-operators-k44j8" Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.753716 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9dqlg\" (UniqueName: \"kubernetes.io/projected/0530f826-c983-428a-9263-ce4c30dc8185-kube-api-access-9dqlg\") pod \"redhat-operators-k44j8\" (UID: \"0530f826-c983-428a-9263-ce4c30dc8185\") " pod="openshift-marketplace/redhat-operators-k44j8" Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.753745 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0530f826-c983-428a-9263-ce4c30dc8185-catalog-content\") pod \"redhat-operators-k44j8\" (UID: \"0530f826-c983-428a-9263-ce4c30dc8185\") " pod="openshift-marketplace/redhat-operators-k44j8" Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.754206 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0530f826-c983-428a-9263-ce4c30dc8185-catalog-content\") pod \"redhat-operators-k44j8\" (UID: \"0530f826-c983-428a-9263-ce4c30dc8185\") " pod="openshift-marketplace/redhat-operators-k44j8" Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.754468 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0530f826-c983-428a-9263-ce4c30dc8185-utilities\") pod \"redhat-operators-k44j8\" (UID: \"0530f826-c983-428a-9263-ce4c30dc8185\") " pod="openshift-marketplace/redhat-operators-k44j8" Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.774217 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9dqlg\" (UniqueName: \"kubernetes.io/projected/0530f826-c983-428a-9263-ce4c30dc8185-kube-api-access-9dqlg\") pod \"redhat-operators-k44j8\" (UID: \"0530f826-c983-428a-9263-ce4c30dc8185\") " pod="openshift-marketplace/redhat-operators-k44j8" Jan 05 22:04:29 crc kubenswrapper[4910]: I0105 22:04:29.953800 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-k44j8" Jan 05 22:04:30 crc kubenswrapper[4910]: I0105 22:04:30.052004 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-g5xxj_8cef9cdb-d8f8-406b-8575-6a6d1b72a638/console/0.log" Jan 05 22:04:30 crc kubenswrapper[4910]: I0105 22:04:30.052540 4910 generic.go:334] "Generic (PLEG): container finished" podID="8cef9cdb-d8f8-406b-8575-6a6d1b72a638" containerID="b74bab86f9cf34833f34affea1f044a4bfa513efc09a52b967914d96933b751c" exitCode=2 Jan 05 22:04:30 crc kubenswrapper[4910]: I0105 22:04:30.052701 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-g5xxj" Jan 05 22:04:30 crc kubenswrapper[4910]: I0105 22:04:30.052724 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-g5xxj" event={"ID":"8cef9cdb-d8f8-406b-8575-6a6d1b72a638","Type":"ContainerDied","Data":"b74bab86f9cf34833f34affea1f044a4bfa513efc09a52b967914d96933b751c"} Jan 05 22:04:30 crc kubenswrapper[4910]: I0105 22:04:30.052811 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-g5xxj" event={"ID":"8cef9cdb-d8f8-406b-8575-6a6d1b72a638","Type":"ContainerDied","Data":"9aad3e13781f0506c1503b0336bb38868f3c6f8405d163e0b2c55e5c947968b6"} Jan 05 22:04:30 crc kubenswrapper[4910]: I0105 22:04:30.052840 4910 scope.go:117] "RemoveContainer" containerID="b74bab86f9cf34833f34affea1f044a4bfa513efc09a52b967914d96933b751c" Jan 05 22:04:30 crc kubenswrapper[4910]: I0105 22:04:30.059276 4910 generic.go:334] "Generic (PLEG): container finished" podID="14994d1c-8f9e-4eab-b9fe-994f9910317b" containerID="d894abffaa8bbaf3e7f0fd492f877e5308f7902963739ea4fdc720d385f54c90" exitCode=0 Jan 05 22:04:30 crc kubenswrapper[4910]: I0105 22:04:30.059342 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx" event={"ID":"14994d1c-8f9e-4eab-b9fe-994f9910317b","Type":"ContainerDied","Data":"d894abffaa8bbaf3e7f0fd492f877e5308f7902963739ea4fdc720d385f54c90"} Jan 05 22:04:30 crc kubenswrapper[4910]: I0105 22:04:30.108177 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-g5xxj"] Jan 05 22:04:30 crc kubenswrapper[4910]: I0105 22:04:30.111861 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-g5xxj"] Jan 05 22:04:30 crc kubenswrapper[4910]: I0105 22:04:30.113382 4910 scope.go:117] "RemoveContainer" containerID="b74bab86f9cf34833f34affea1f044a4bfa513efc09a52b967914d96933b751c" Jan 05 22:04:30 crc kubenswrapper[4910]: E0105 22:04:30.114197 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b74bab86f9cf34833f34affea1f044a4bfa513efc09a52b967914d96933b751c\": container with ID starting with b74bab86f9cf34833f34affea1f044a4bfa513efc09a52b967914d96933b751c not found: ID does not exist" containerID="b74bab86f9cf34833f34affea1f044a4bfa513efc09a52b967914d96933b751c" Jan 05 22:04:30 crc kubenswrapper[4910]: I0105 22:04:30.114240 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b74bab86f9cf34833f34affea1f044a4bfa513efc09a52b967914d96933b751c"} err="failed to get container status \"b74bab86f9cf34833f34affea1f044a4bfa513efc09a52b967914d96933b751c\": rpc error: code = NotFound desc = could not find container \"b74bab86f9cf34833f34affea1f044a4bfa513efc09a52b967914d96933b751c\": container with ID starting with b74bab86f9cf34833f34affea1f044a4bfa513efc09a52b967914d96933b751c not found: ID does not exist" Jan 05 22:04:30 crc kubenswrapper[4910]: I0105 22:04:30.214561 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-k44j8"] Jan 05 22:04:30 crc kubenswrapper[4910]: I0105 22:04:30.733145 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cef9cdb-d8f8-406b-8575-6a6d1b72a638" path="/var/lib/kubelet/pods/8cef9cdb-d8f8-406b-8575-6a6d1b72a638/volumes" Jan 05 22:04:31 crc kubenswrapper[4910]: I0105 22:04:31.068198 4910 generic.go:334] "Generic (PLEG): container finished" podID="14994d1c-8f9e-4eab-b9fe-994f9910317b" containerID="ebd39fe437d524e3c8952bec2e09a87ec94c5c6d66db848ad5a0c5796ac74a5f" exitCode=0 Jan 05 22:04:31 crc kubenswrapper[4910]: I0105 22:04:31.068292 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx" event={"ID":"14994d1c-8f9e-4eab-b9fe-994f9910317b","Type":"ContainerDied","Data":"ebd39fe437d524e3c8952bec2e09a87ec94c5c6d66db848ad5a0c5796ac74a5f"} Jan 05 22:04:31 crc kubenswrapper[4910]: I0105 22:04:31.070562 4910 generic.go:334] "Generic (PLEG): container finished" podID="0530f826-c983-428a-9263-ce4c30dc8185" containerID="625fdacb9f44a0cbc64bc409f45d450c0de055dfa6987e2d83e8a0bf10bb88f0" exitCode=0 Jan 05 22:04:31 crc kubenswrapper[4910]: I0105 22:04:31.070669 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k44j8" event={"ID":"0530f826-c983-428a-9263-ce4c30dc8185","Type":"ContainerDied","Data":"625fdacb9f44a0cbc64bc409f45d450c0de055dfa6987e2d83e8a0bf10bb88f0"} Jan 05 22:04:31 crc kubenswrapper[4910]: I0105 22:04:31.070690 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k44j8" event={"ID":"0530f826-c983-428a-9263-ce4c30dc8185","Type":"ContainerStarted","Data":"346d3a57c397c39e0f078b9b6ed7b9e78f62c72e5c3e833aa8899bb5fd28ea75"} Jan 05 22:04:32 crc kubenswrapper[4910]: I0105 22:04:32.303084 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx" Jan 05 22:04:32 crc kubenswrapper[4910]: I0105 22:04:32.492513 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vqnd4\" (UniqueName: \"kubernetes.io/projected/14994d1c-8f9e-4eab-b9fe-994f9910317b-kube-api-access-vqnd4\") pod \"14994d1c-8f9e-4eab-b9fe-994f9910317b\" (UID: \"14994d1c-8f9e-4eab-b9fe-994f9910317b\") " Jan 05 22:04:32 crc kubenswrapper[4910]: I0105 22:04:32.492888 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/14994d1c-8f9e-4eab-b9fe-994f9910317b-util\") pod \"14994d1c-8f9e-4eab-b9fe-994f9910317b\" (UID: \"14994d1c-8f9e-4eab-b9fe-994f9910317b\") " Jan 05 22:04:32 crc kubenswrapper[4910]: I0105 22:04:32.492929 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/14994d1c-8f9e-4eab-b9fe-994f9910317b-bundle\") pod \"14994d1c-8f9e-4eab-b9fe-994f9910317b\" (UID: \"14994d1c-8f9e-4eab-b9fe-994f9910317b\") " Jan 05 22:04:32 crc kubenswrapper[4910]: I0105 22:04:32.493834 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/14994d1c-8f9e-4eab-b9fe-994f9910317b-bundle" (OuterVolumeSpecName: "bundle") pod "14994d1c-8f9e-4eab-b9fe-994f9910317b" (UID: "14994d1c-8f9e-4eab-b9fe-994f9910317b"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:04:32 crc kubenswrapper[4910]: I0105 22:04:32.498298 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14994d1c-8f9e-4eab-b9fe-994f9910317b-kube-api-access-vqnd4" (OuterVolumeSpecName: "kube-api-access-vqnd4") pod "14994d1c-8f9e-4eab-b9fe-994f9910317b" (UID: "14994d1c-8f9e-4eab-b9fe-994f9910317b"). InnerVolumeSpecName "kube-api-access-vqnd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:04:32 crc kubenswrapper[4910]: I0105 22:04:32.506988 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/14994d1c-8f9e-4eab-b9fe-994f9910317b-util" (OuterVolumeSpecName: "util") pod "14994d1c-8f9e-4eab-b9fe-994f9910317b" (UID: "14994d1c-8f9e-4eab-b9fe-994f9910317b"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:04:32 crc kubenswrapper[4910]: I0105 22:04:32.594354 4910 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/14994d1c-8f9e-4eab-b9fe-994f9910317b-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:04:32 crc kubenswrapper[4910]: I0105 22:04:32.594396 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vqnd4\" (UniqueName: \"kubernetes.io/projected/14994d1c-8f9e-4eab-b9fe-994f9910317b-kube-api-access-vqnd4\") on node \"crc\" DevicePath \"\"" Jan 05 22:04:32 crc kubenswrapper[4910]: I0105 22:04:32.594406 4910 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/14994d1c-8f9e-4eab-b9fe-994f9910317b-util\") on node \"crc\" DevicePath \"\"" Jan 05 22:04:33 crc kubenswrapper[4910]: I0105 22:04:33.099614 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx" event={"ID":"14994d1c-8f9e-4eab-b9fe-994f9910317b","Type":"ContainerDied","Data":"db2683fef0e917a7937877b21c0105c749642e02d4bd1f3aeb7cd3ccea6d8043"} Jan 05 22:04:33 crc kubenswrapper[4910]: I0105 22:04:33.099764 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="db2683fef0e917a7937877b21c0105c749642e02d4bd1f3aeb7cd3ccea6d8043" Jan 05 22:04:33 crc kubenswrapper[4910]: I0105 22:04:33.099693 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx" Jan 05 22:04:33 crc kubenswrapper[4910]: I0105 22:04:33.105157 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k44j8" event={"ID":"0530f826-c983-428a-9263-ce4c30dc8185","Type":"ContainerStarted","Data":"cc8944a6e80a9a106bf7fbb285937a0abc4e1eeb5cb4cede53eee0f51ad9d672"} Jan 05 22:04:34 crc kubenswrapper[4910]: I0105 22:04:34.112493 4910 generic.go:334] "Generic (PLEG): container finished" podID="0530f826-c983-428a-9263-ce4c30dc8185" containerID="cc8944a6e80a9a106bf7fbb285937a0abc4e1eeb5cb4cede53eee0f51ad9d672" exitCode=0 Jan 05 22:04:34 crc kubenswrapper[4910]: I0105 22:04:34.112593 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k44j8" event={"ID":"0530f826-c983-428a-9263-ce4c30dc8185","Type":"ContainerDied","Data":"cc8944a6e80a9a106bf7fbb285937a0abc4e1eeb5cb4cede53eee0f51ad9d672"} Jan 05 22:04:37 crc kubenswrapper[4910]: I0105 22:04:37.133042 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k44j8" event={"ID":"0530f826-c983-428a-9263-ce4c30dc8185","Type":"ContainerStarted","Data":"b2006042a52e069819131ca6a03c710ef7d62fc448f84ee859300fd041c55ddb"} Jan 05 22:04:37 crc kubenswrapper[4910]: I0105 22:04:37.148664 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-k44j8" podStartSLOduration=3.329550763 podStartE2EDuration="8.148640581s" podCreationTimestamp="2026-01-05 22:04:29 +0000 UTC" firstStartedPulling="2026-01-05 22:04:31.072709791 +0000 UTC m=+802.650207481" lastFinishedPulling="2026-01-05 22:04:35.891799619 +0000 UTC m=+807.469297299" observedRunningTime="2026-01-05 22:04:37.148073407 +0000 UTC m=+808.725571077" watchObservedRunningTime="2026-01-05 22:04:37.148640581 +0000 UTC m=+808.726138251" Jan 05 22:04:39 crc kubenswrapper[4910]: I0105 22:04:39.954299 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-k44j8" Jan 05 22:04:39 crc kubenswrapper[4910]: I0105 22:04:39.955796 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-k44j8" Jan 05 22:04:40 crc kubenswrapper[4910]: I0105 22:04:40.952077 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:04:40 crc kubenswrapper[4910]: I0105 22:04:40.952664 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:04:41 crc kubenswrapper[4910]: I0105 22:04:41.005182 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-k44j8" podUID="0530f826-c983-428a-9263-ce4c30dc8185" containerName="registry-server" probeResult="failure" output=< Jan 05 22:04:41 crc kubenswrapper[4910]: timeout: failed to connect service ":50051" within 1s Jan 05 22:04:41 crc kubenswrapper[4910]: > Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.249315 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-74d48df479-fzsxf"] Jan 05 22:04:42 crc kubenswrapper[4910]: E0105 22:04:42.249661 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14994d1c-8f9e-4eab-b9fe-994f9910317b" containerName="extract" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.249678 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="14994d1c-8f9e-4eab-b9fe-994f9910317b" containerName="extract" Jan 05 22:04:42 crc kubenswrapper[4910]: E0105 22:04:42.249695 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14994d1c-8f9e-4eab-b9fe-994f9910317b" containerName="util" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.249703 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="14994d1c-8f9e-4eab-b9fe-994f9910317b" containerName="util" Jan 05 22:04:42 crc kubenswrapper[4910]: E0105 22:04:42.249719 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14994d1c-8f9e-4eab-b9fe-994f9910317b" containerName="pull" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.249727 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="14994d1c-8f9e-4eab-b9fe-994f9910317b" containerName="pull" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.249856 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="14994d1c-8f9e-4eab-b9fe-994f9910317b" containerName="extract" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.250444 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-74d48df479-fzsxf" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.255183 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.255448 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.255468 4910 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.255802 4910 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.257399 4910 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-fznx2" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.275584 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-74d48df479-fzsxf"] Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.327663 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1dbbe994-4065-4184-868f-98d333741069-apiservice-cert\") pod \"metallb-operator-controller-manager-74d48df479-fzsxf\" (UID: \"1dbbe994-4065-4184-868f-98d333741069\") " pod="metallb-system/metallb-operator-controller-manager-74d48df479-fzsxf" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.327719 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1dbbe994-4065-4184-868f-98d333741069-webhook-cert\") pod \"metallb-operator-controller-manager-74d48df479-fzsxf\" (UID: \"1dbbe994-4065-4184-868f-98d333741069\") " pod="metallb-system/metallb-operator-controller-manager-74d48df479-fzsxf" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.327748 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srxm4\" (UniqueName: \"kubernetes.io/projected/1dbbe994-4065-4184-868f-98d333741069-kube-api-access-srxm4\") pod \"metallb-operator-controller-manager-74d48df479-fzsxf\" (UID: \"1dbbe994-4065-4184-868f-98d333741069\") " pod="metallb-system/metallb-operator-controller-manager-74d48df479-fzsxf" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.428865 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1dbbe994-4065-4184-868f-98d333741069-apiservice-cert\") pod \"metallb-operator-controller-manager-74d48df479-fzsxf\" (UID: \"1dbbe994-4065-4184-868f-98d333741069\") " pod="metallb-system/metallb-operator-controller-manager-74d48df479-fzsxf" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.428930 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1dbbe994-4065-4184-868f-98d333741069-webhook-cert\") pod \"metallb-operator-controller-manager-74d48df479-fzsxf\" (UID: \"1dbbe994-4065-4184-868f-98d333741069\") " pod="metallb-system/metallb-operator-controller-manager-74d48df479-fzsxf" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.428960 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srxm4\" (UniqueName: \"kubernetes.io/projected/1dbbe994-4065-4184-868f-98d333741069-kube-api-access-srxm4\") pod \"metallb-operator-controller-manager-74d48df479-fzsxf\" (UID: \"1dbbe994-4065-4184-868f-98d333741069\") " pod="metallb-system/metallb-operator-controller-manager-74d48df479-fzsxf" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.436972 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1dbbe994-4065-4184-868f-98d333741069-webhook-cert\") pod \"metallb-operator-controller-manager-74d48df479-fzsxf\" (UID: \"1dbbe994-4065-4184-868f-98d333741069\") " pod="metallb-system/metallb-operator-controller-manager-74d48df479-fzsxf" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.437459 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1dbbe994-4065-4184-868f-98d333741069-apiservice-cert\") pod \"metallb-operator-controller-manager-74d48df479-fzsxf\" (UID: \"1dbbe994-4065-4184-868f-98d333741069\") " pod="metallb-system/metallb-operator-controller-manager-74d48df479-fzsxf" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.446405 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srxm4\" (UniqueName: \"kubernetes.io/projected/1dbbe994-4065-4184-868f-98d333741069-kube-api-access-srxm4\") pod \"metallb-operator-controller-manager-74d48df479-fzsxf\" (UID: \"1dbbe994-4065-4184-868f-98d333741069\") " pod="metallb-system/metallb-operator-controller-manager-74d48df479-fzsxf" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.567557 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-74d48df479-fzsxf" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.722229 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-b5f859b96-ztvsm"] Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.723569 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-b5f859b96-ztvsm" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.727988 4910 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.728188 4910 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.728382 4910 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-r9hgf" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.749098 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-b5f859b96-ztvsm"] Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.836967 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6c442f78-8c84-44eb-851c-836a2473aea7-apiservice-cert\") pod \"metallb-operator-webhook-server-b5f859b96-ztvsm\" (UID: \"6c442f78-8c84-44eb-851c-836a2473aea7\") " pod="metallb-system/metallb-operator-webhook-server-b5f859b96-ztvsm" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.837057 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s826b\" (UniqueName: \"kubernetes.io/projected/6c442f78-8c84-44eb-851c-836a2473aea7-kube-api-access-s826b\") pod \"metallb-operator-webhook-server-b5f859b96-ztvsm\" (UID: \"6c442f78-8c84-44eb-851c-836a2473aea7\") " pod="metallb-system/metallb-operator-webhook-server-b5f859b96-ztvsm" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.837107 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6c442f78-8c84-44eb-851c-836a2473aea7-webhook-cert\") pod \"metallb-operator-webhook-server-b5f859b96-ztvsm\" (UID: \"6c442f78-8c84-44eb-851c-836a2473aea7\") " pod="metallb-system/metallb-operator-webhook-server-b5f859b96-ztvsm" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.938771 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6c442f78-8c84-44eb-851c-836a2473aea7-webhook-cert\") pod \"metallb-operator-webhook-server-b5f859b96-ztvsm\" (UID: \"6c442f78-8c84-44eb-851c-836a2473aea7\") " pod="metallb-system/metallb-operator-webhook-server-b5f859b96-ztvsm" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.938863 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6c442f78-8c84-44eb-851c-836a2473aea7-apiservice-cert\") pod \"metallb-operator-webhook-server-b5f859b96-ztvsm\" (UID: \"6c442f78-8c84-44eb-851c-836a2473aea7\") " pod="metallb-system/metallb-operator-webhook-server-b5f859b96-ztvsm" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.938896 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s826b\" (UniqueName: \"kubernetes.io/projected/6c442f78-8c84-44eb-851c-836a2473aea7-kube-api-access-s826b\") pod \"metallb-operator-webhook-server-b5f859b96-ztvsm\" (UID: \"6c442f78-8c84-44eb-851c-836a2473aea7\") " pod="metallb-system/metallb-operator-webhook-server-b5f859b96-ztvsm" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.947271 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6c442f78-8c84-44eb-851c-836a2473aea7-apiservice-cert\") pod \"metallb-operator-webhook-server-b5f859b96-ztvsm\" (UID: \"6c442f78-8c84-44eb-851c-836a2473aea7\") " pod="metallb-system/metallb-operator-webhook-server-b5f859b96-ztvsm" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.947347 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6c442f78-8c84-44eb-851c-836a2473aea7-webhook-cert\") pod \"metallb-operator-webhook-server-b5f859b96-ztvsm\" (UID: \"6c442f78-8c84-44eb-851c-836a2473aea7\") " pod="metallb-system/metallb-operator-webhook-server-b5f859b96-ztvsm" Jan 05 22:04:42 crc kubenswrapper[4910]: I0105 22:04:42.979435 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s826b\" (UniqueName: \"kubernetes.io/projected/6c442f78-8c84-44eb-851c-836a2473aea7-kube-api-access-s826b\") pod \"metallb-operator-webhook-server-b5f859b96-ztvsm\" (UID: \"6c442f78-8c84-44eb-851c-836a2473aea7\") " pod="metallb-system/metallb-operator-webhook-server-b5f859b96-ztvsm" Jan 05 22:04:43 crc kubenswrapper[4910]: I0105 22:04:43.025916 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-74d48df479-fzsxf"] Jan 05 22:04:43 crc kubenswrapper[4910]: W0105 22:04:43.034415 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1dbbe994_4065_4184_868f_98d333741069.slice/crio-5aaec882f559a5332b9010bec6e5f60294692d82cc9bcaf0b6f94c2ff573aa44 WatchSource:0}: Error finding container 5aaec882f559a5332b9010bec6e5f60294692d82cc9bcaf0b6f94c2ff573aa44: Status 404 returned error can't find the container with id 5aaec882f559a5332b9010bec6e5f60294692d82cc9bcaf0b6f94c2ff573aa44 Jan 05 22:04:43 crc kubenswrapper[4910]: I0105 22:04:43.078424 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-b5f859b96-ztvsm" Jan 05 22:04:43 crc kubenswrapper[4910]: I0105 22:04:43.171835 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-74d48df479-fzsxf" event={"ID":"1dbbe994-4065-4184-868f-98d333741069","Type":"ContainerStarted","Data":"5aaec882f559a5332b9010bec6e5f60294692d82cc9bcaf0b6f94c2ff573aa44"} Jan 05 22:04:43 crc kubenswrapper[4910]: I0105 22:04:43.342745 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-b5f859b96-ztvsm"] Jan 05 22:04:43 crc kubenswrapper[4910]: W0105 22:04:43.350035 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6c442f78_8c84_44eb_851c_836a2473aea7.slice/crio-b2bd99c26714a256409eb086f96f80e79d286bee6913dfe9f7e115d9c15ccce3 WatchSource:0}: Error finding container b2bd99c26714a256409eb086f96f80e79d286bee6913dfe9f7e115d9c15ccce3: Status 404 returned error can't find the container with id b2bd99c26714a256409eb086f96f80e79d286bee6913dfe9f7e115d9c15ccce3 Jan 05 22:04:44 crc kubenswrapper[4910]: I0105 22:04:44.178899 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-b5f859b96-ztvsm" event={"ID":"6c442f78-8c84-44eb-851c-836a2473aea7","Type":"ContainerStarted","Data":"b2bd99c26714a256409eb086f96f80e79d286bee6913dfe9f7e115d9c15ccce3"} Jan 05 22:04:49 crc kubenswrapper[4910]: I0105 22:04:49.209338 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-74d48df479-fzsxf" event={"ID":"1dbbe994-4065-4184-868f-98d333741069","Type":"ContainerStarted","Data":"50ffb8f9f8401ee17cd7e1a2ed11b65a5860bf5d1a166ef63f5341f82cbb0381"} Jan 05 22:04:49 crc kubenswrapper[4910]: I0105 22:04:49.209983 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-74d48df479-fzsxf" Jan 05 22:04:49 crc kubenswrapper[4910]: I0105 22:04:49.233418 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-74d48df479-fzsxf" podStartSLOduration=2.019518258 podStartE2EDuration="7.233396777s" podCreationTimestamp="2026-01-05 22:04:42 +0000 UTC" firstStartedPulling="2026-01-05 22:04:43.037724368 +0000 UTC m=+814.615230748" lastFinishedPulling="2026-01-05 22:04:48.251611597 +0000 UTC m=+819.829109267" observedRunningTime="2026-01-05 22:04:49.2278336 +0000 UTC m=+820.805331280" watchObservedRunningTime="2026-01-05 22:04:49.233396777 +0000 UTC m=+820.810894447" Jan 05 22:04:49 crc kubenswrapper[4910]: I0105 22:04:49.994470 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-k44j8" Jan 05 22:04:50 crc kubenswrapper[4910]: I0105 22:04:50.041856 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-k44j8" Jan 05 22:04:50 crc kubenswrapper[4910]: I0105 22:04:50.237441 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-k44j8"] Jan 05 22:04:51 crc kubenswrapper[4910]: I0105 22:04:51.222145 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-k44j8" podUID="0530f826-c983-428a-9263-ce4c30dc8185" containerName="registry-server" containerID="cri-o://b2006042a52e069819131ca6a03c710ef7d62fc448f84ee859300fd041c55ddb" gracePeriod=2 Jan 05 22:04:52 crc kubenswrapper[4910]: I0105 22:04:52.239485 4910 generic.go:334] "Generic (PLEG): container finished" podID="0530f826-c983-428a-9263-ce4c30dc8185" containerID="b2006042a52e069819131ca6a03c710ef7d62fc448f84ee859300fd041c55ddb" exitCode=0 Jan 05 22:04:52 crc kubenswrapper[4910]: I0105 22:04:52.239727 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k44j8" event={"ID":"0530f826-c983-428a-9263-ce4c30dc8185","Type":"ContainerDied","Data":"b2006042a52e069819131ca6a03c710ef7d62fc448f84ee859300fd041c55ddb"} Jan 05 22:04:53 crc kubenswrapper[4910]: I0105 22:04:53.238397 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-k44j8" Jan 05 22:04:53 crc kubenswrapper[4910]: I0105 22:04:53.247755 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k44j8" event={"ID":"0530f826-c983-428a-9263-ce4c30dc8185","Type":"ContainerDied","Data":"346d3a57c397c39e0f078b9b6ed7b9e78f62c72e5c3e833aa8899bb5fd28ea75"} Jan 05 22:04:53 crc kubenswrapper[4910]: I0105 22:04:53.247799 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-k44j8" Jan 05 22:04:53 crc kubenswrapper[4910]: I0105 22:04:53.247819 4910 scope.go:117] "RemoveContainer" containerID="b2006042a52e069819131ca6a03c710ef7d62fc448f84ee859300fd041c55ddb" Jan 05 22:04:53 crc kubenswrapper[4910]: I0105 22:04:53.278322 4910 scope.go:117] "RemoveContainer" containerID="cc8944a6e80a9a106bf7fbb285937a0abc4e1eeb5cb4cede53eee0f51ad9d672" Jan 05 22:04:53 crc kubenswrapper[4910]: I0105 22:04:53.307479 4910 scope.go:117] "RemoveContainer" containerID="625fdacb9f44a0cbc64bc409f45d450c0de055dfa6987e2d83e8a0bf10bb88f0" Jan 05 22:04:53 crc kubenswrapper[4910]: I0105 22:04:53.401738 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0530f826-c983-428a-9263-ce4c30dc8185-utilities\") pod \"0530f826-c983-428a-9263-ce4c30dc8185\" (UID: \"0530f826-c983-428a-9263-ce4c30dc8185\") " Jan 05 22:04:53 crc kubenswrapper[4910]: I0105 22:04:53.402050 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9dqlg\" (UniqueName: \"kubernetes.io/projected/0530f826-c983-428a-9263-ce4c30dc8185-kube-api-access-9dqlg\") pod \"0530f826-c983-428a-9263-ce4c30dc8185\" (UID: \"0530f826-c983-428a-9263-ce4c30dc8185\") " Jan 05 22:04:53 crc kubenswrapper[4910]: I0105 22:04:53.402110 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0530f826-c983-428a-9263-ce4c30dc8185-catalog-content\") pod \"0530f826-c983-428a-9263-ce4c30dc8185\" (UID: \"0530f826-c983-428a-9263-ce4c30dc8185\") " Jan 05 22:04:53 crc kubenswrapper[4910]: I0105 22:04:53.403435 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0530f826-c983-428a-9263-ce4c30dc8185-utilities" (OuterVolumeSpecName: "utilities") pod "0530f826-c983-428a-9263-ce4c30dc8185" (UID: "0530f826-c983-428a-9263-ce4c30dc8185"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:04:53 crc kubenswrapper[4910]: I0105 22:04:53.412183 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0530f826-c983-428a-9263-ce4c30dc8185-kube-api-access-9dqlg" (OuterVolumeSpecName: "kube-api-access-9dqlg") pod "0530f826-c983-428a-9263-ce4c30dc8185" (UID: "0530f826-c983-428a-9263-ce4c30dc8185"). InnerVolumeSpecName "kube-api-access-9dqlg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:04:53 crc kubenswrapper[4910]: I0105 22:04:53.504055 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0530f826-c983-428a-9263-ce4c30dc8185-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 22:04:53 crc kubenswrapper[4910]: I0105 22:04:53.504106 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9dqlg\" (UniqueName: \"kubernetes.io/projected/0530f826-c983-428a-9263-ce4c30dc8185-kube-api-access-9dqlg\") on node \"crc\" DevicePath \"\"" Jan 05 22:04:53 crc kubenswrapper[4910]: I0105 22:04:53.516671 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0530f826-c983-428a-9263-ce4c30dc8185-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0530f826-c983-428a-9263-ce4c30dc8185" (UID: "0530f826-c983-428a-9263-ce4c30dc8185"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:04:53 crc kubenswrapper[4910]: I0105 22:04:53.573811 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-k44j8"] Jan 05 22:04:53 crc kubenswrapper[4910]: I0105 22:04:53.582463 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-k44j8"] Jan 05 22:04:53 crc kubenswrapper[4910]: I0105 22:04:53.605190 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0530f826-c983-428a-9263-ce4c30dc8185-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 22:04:54 crc kubenswrapper[4910]: I0105 22:04:54.256779 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-b5f859b96-ztvsm" event={"ID":"6c442f78-8c84-44eb-851c-836a2473aea7","Type":"ContainerStarted","Data":"fca0147417c21c981f8106647861e1bd2b032f0d28d74f851abe0c67748c1791"} Jan 05 22:04:54 crc kubenswrapper[4910]: I0105 22:04:54.256939 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-b5f859b96-ztvsm" Jan 05 22:04:54 crc kubenswrapper[4910]: I0105 22:04:54.278797 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-b5f859b96-ztvsm" podStartSLOduration=2.348691911 podStartE2EDuration="12.278777684s" podCreationTimestamp="2026-01-05 22:04:42 +0000 UTC" firstStartedPulling="2026-01-05 22:04:43.353083907 +0000 UTC m=+814.930581577" lastFinishedPulling="2026-01-05 22:04:53.28316968 +0000 UTC m=+824.860667350" observedRunningTime="2026-01-05 22:04:54.276264711 +0000 UTC m=+825.853762371" watchObservedRunningTime="2026-01-05 22:04:54.278777684 +0000 UTC m=+825.856275354" Jan 05 22:04:54 crc kubenswrapper[4910]: I0105 22:04:54.728189 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0530f826-c983-428a-9263-ce4c30dc8185" path="/var/lib/kubelet/pods/0530f826-c983-428a-9263-ce4c30dc8185/volumes" Jan 05 22:05:03 crc kubenswrapper[4910]: I0105 22:05:03.094079 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-b5f859b96-ztvsm" Jan 05 22:05:10 crc kubenswrapper[4910]: I0105 22:05:10.952918 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:05:10 crc kubenswrapper[4910]: I0105 22:05:10.953589 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:05:10 crc kubenswrapper[4910]: I0105 22:05:10.953646 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 22:05:10 crc kubenswrapper[4910]: I0105 22:05:10.954381 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9e520e28b4c82f9c661ef0957d57afd6c58639ff887c3906d5a2d181968d14b2"} pod="openshift-machine-config-operator/machine-config-daemon-p4t85" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 05 22:05:10 crc kubenswrapper[4910]: I0105 22:05:10.954444 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" containerID="cri-o://9e520e28b4c82f9c661ef0957d57afd6c58639ff887c3906d5a2d181968d14b2" gracePeriod=600 Jan 05 22:05:11 crc kubenswrapper[4910]: I0105 22:05:11.378838 4910 generic.go:334] "Generic (PLEG): container finished" podID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerID="9e520e28b4c82f9c661ef0957d57afd6c58639ff887c3906d5a2d181968d14b2" exitCode=0 Jan 05 22:05:11 crc kubenswrapper[4910]: I0105 22:05:11.378909 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerDied","Data":"9e520e28b4c82f9c661ef0957d57afd6c58639ff887c3906d5a2d181968d14b2"} Jan 05 22:05:11 crc kubenswrapper[4910]: I0105 22:05:11.379417 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerStarted","Data":"a3fde00ac3c0f56cd1cc5b71d4cb8772dfa8207e8240fa1964337d79bc9075bf"} Jan 05 22:05:11 crc kubenswrapper[4910]: I0105 22:05:11.379495 4910 scope.go:117] "RemoveContainer" containerID="24ad24a0bc4cca661f52af59417069858c5167c646d199a5c1c243653f4dbcbf" Jan 05 22:05:22 crc kubenswrapper[4910]: I0105 22:05:22.571293 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-74d48df479-fzsxf" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.356753 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-dhdgh"] Jan 05 22:05:23 crc kubenswrapper[4910]: E0105 22:05:23.357602 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0530f826-c983-428a-9263-ce4c30dc8185" containerName="registry-server" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.357630 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="0530f826-c983-428a-9263-ce4c30dc8185" containerName="registry-server" Jan 05 22:05:23 crc kubenswrapper[4910]: E0105 22:05:23.357671 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0530f826-c983-428a-9263-ce4c30dc8185" containerName="extract-utilities" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.357684 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="0530f826-c983-428a-9263-ce4c30dc8185" containerName="extract-utilities" Jan 05 22:05:23 crc kubenswrapper[4910]: E0105 22:05:23.357703 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0530f826-c983-428a-9263-ce4c30dc8185" containerName="extract-content" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.357714 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="0530f826-c983-428a-9263-ce4c30dc8185" containerName="extract-content" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.357888 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="0530f826-c983-428a-9263-ce4c30dc8185" containerName="registry-server" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.361727 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-dhdgh" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.364648 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7784b6fcf-n8npn"] Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.365415 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-n8npn" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.367634 4910 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.368066 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.368290 4910 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-ldxfs" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.369230 4910 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.378275 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7784b6fcf-n8npn"] Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.461075 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-msrzn"] Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.462077 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-msrzn" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.464232 4910 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.464742 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.464902 4910 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-6kxdv" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.466019 4910 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.472476 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w79nc\" (UniqueName: \"kubernetes.io/projected/73796739-d310-4f62-96b9-1634f13d77ae-kube-api-access-w79nc\") pod \"frr-k8s-webhook-server-7784b6fcf-n8npn\" (UID: \"73796739-d310-4f62-96b9-1634f13d77ae\") " pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-n8npn" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.472579 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/73796739-d310-4f62-96b9-1634f13d77ae-cert\") pod \"frr-k8s-webhook-server-7784b6fcf-n8npn\" (UID: \"73796739-d310-4f62-96b9-1634f13d77ae\") " pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-n8npn" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.472661 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/f5a1ce03-3e27-472c-9e32-20c967308ac8-frr-startup\") pod \"frr-k8s-dhdgh\" (UID: \"f5a1ce03-3e27-472c-9e32-20c967308ac8\") " pod="metallb-system/frr-k8s-dhdgh" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.472714 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/f5a1ce03-3e27-472c-9e32-20c967308ac8-reloader\") pod \"frr-k8s-dhdgh\" (UID: \"f5a1ce03-3e27-472c-9e32-20c967308ac8\") " pod="metallb-system/frr-k8s-dhdgh" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.472739 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/f5a1ce03-3e27-472c-9e32-20c967308ac8-metrics\") pod \"frr-k8s-dhdgh\" (UID: \"f5a1ce03-3e27-472c-9e32-20c967308ac8\") " pod="metallb-system/frr-k8s-dhdgh" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.472791 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/f5a1ce03-3e27-472c-9e32-20c967308ac8-frr-sockets\") pod \"frr-k8s-dhdgh\" (UID: \"f5a1ce03-3e27-472c-9e32-20c967308ac8\") " pod="metallb-system/frr-k8s-dhdgh" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.472822 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g4h45\" (UniqueName: \"kubernetes.io/projected/f5a1ce03-3e27-472c-9e32-20c967308ac8-kube-api-access-g4h45\") pod \"frr-k8s-dhdgh\" (UID: \"f5a1ce03-3e27-472c-9e32-20c967308ac8\") " pod="metallb-system/frr-k8s-dhdgh" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.472859 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/f5a1ce03-3e27-472c-9e32-20c967308ac8-frr-conf\") pod \"frr-k8s-dhdgh\" (UID: \"f5a1ce03-3e27-472c-9e32-20c967308ac8\") " pod="metallb-system/frr-k8s-dhdgh" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.472885 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f5a1ce03-3e27-472c-9e32-20c967308ac8-metrics-certs\") pod \"frr-k8s-dhdgh\" (UID: \"f5a1ce03-3e27-472c-9e32-20c967308ac8\") " pod="metallb-system/frr-k8s-dhdgh" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.487763 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-5bddd4b946-ztwkd"] Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.489184 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-5bddd4b946-ztwkd" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.492596 4910 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.506975 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-5bddd4b946-ztwkd"] Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.574398 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/f5a1ce03-3e27-472c-9e32-20c967308ac8-reloader\") pod \"frr-k8s-dhdgh\" (UID: \"f5a1ce03-3e27-472c-9e32-20c967308ac8\") " pod="metallb-system/frr-k8s-dhdgh" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.574473 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/62d69338-9eb3-4401-95af-3dcaf1ce48d3-cert\") pod \"controller-5bddd4b946-ztwkd\" (UID: \"62d69338-9eb3-4401-95af-3dcaf1ce48d3\") " pod="metallb-system/controller-5bddd4b946-ztwkd" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.574514 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/f5a1ce03-3e27-472c-9e32-20c967308ac8-metrics\") pod \"frr-k8s-dhdgh\" (UID: \"f5a1ce03-3e27-472c-9e32-20c967308ac8\") " pod="metallb-system/frr-k8s-dhdgh" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.574639 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/f5a1ce03-3e27-472c-9e32-20c967308ac8-frr-sockets\") pod \"frr-k8s-dhdgh\" (UID: \"f5a1ce03-3e27-472c-9e32-20c967308ac8\") " pod="metallb-system/frr-k8s-dhdgh" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.574718 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g4h45\" (UniqueName: \"kubernetes.io/projected/f5a1ce03-3e27-472c-9e32-20c967308ac8-kube-api-access-g4h45\") pod \"frr-k8s-dhdgh\" (UID: \"f5a1ce03-3e27-472c-9e32-20c967308ac8\") " pod="metallb-system/frr-k8s-dhdgh" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.574739 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/f5a1ce03-3e27-472c-9e32-20c967308ac8-frr-conf\") pod \"frr-k8s-dhdgh\" (UID: \"f5a1ce03-3e27-472c-9e32-20c967308ac8\") " pod="metallb-system/frr-k8s-dhdgh" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.574772 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8542l\" (UniqueName: \"kubernetes.io/projected/62d69338-9eb3-4401-95af-3dcaf1ce48d3-kube-api-access-8542l\") pod \"controller-5bddd4b946-ztwkd\" (UID: \"62d69338-9eb3-4401-95af-3dcaf1ce48d3\") " pod="metallb-system/controller-5bddd4b946-ztwkd" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.574817 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/a8265d43-4c9a-499d-9fb7-84292f113454-metallb-excludel2\") pod \"speaker-msrzn\" (UID: \"a8265d43-4c9a-499d-9fb7-84292f113454\") " pod="metallb-system/speaker-msrzn" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.574835 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f5a1ce03-3e27-472c-9e32-20c967308ac8-metrics-certs\") pod \"frr-k8s-dhdgh\" (UID: \"f5a1ce03-3e27-472c-9e32-20c967308ac8\") " pod="metallb-system/frr-k8s-dhdgh" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.574868 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w79nc\" (UniqueName: \"kubernetes.io/projected/73796739-d310-4f62-96b9-1634f13d77ae-kube-api-access-w79nc\") pod \"frr-k8s-webhook-server-7784b6fcf-n8npn\" (UID: \"73796739-d310-4f62-96b9-1634f13d77ae\") " pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-n8npn" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.574900 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/a8265d43-4c9a-499d-9fb7-84292f113454-memberlist\") pod \"speaker-msrzn\" (UID: \"a8265d43-4c9a-499d-9fb7-84292f113454\") " pod="metallb-system/speaker-msrzn" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.574947 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/73796739-d310-4f62-96b9-1634f13d77ae-cert\") pod \"frr-k8s-webhook-server-7784b6fcf-n8npn\" (UID: \"73796739-d310-4f62-96b9-1634f13d77ae\") " pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-n8npn" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.574957 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/f5a1ce03-3e27-472c-9e32-20c967308ac8-metrics\") pod \"frr-k8s-dhdgh\" (UID: \"f5a1ce03-3e27-472c-9e32-20c967308ac8\") " pod="metallb-system/frr-k8s-dhdgh" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.575234 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/f5a1ce03-3e27-472c-9e32-20c967308ac8-frr-sockets\") pod \"frr-k8s-dhdgh\" (UID: \"f5a1ce03-3e27-472c-9e32-20c967308ac8\") " pod="metallb-system/frr-k8s-dhdgh" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.575550 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/f5a1ce03-3e27-472c-9e32-20c967308ac8-reloader\") pod \"frr-k8s-dhdgh\" (UID: \"f5a1ce03-3e27-472c-9e32-20c967308ac8\") " pod="metallb-system/frr-k8s-dhdgh" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.576058 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/f5a1ce03-3e27-472c-9e32-20c967308ac8-frr-conf\") pod \"frr-k8s-dhdgh\" (UID: \"f5a1ce03-3e27-472c-9e32-20c967308ac8\") " pod="metallb-system/frr-k8s-dhdgh" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.577175 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a8265d43-4c9a-499d-9fb7-84292f113454-metrics-certs\") pod \"speaker-msrzn\" (UID: \"a8265d43-4c9a-499d-9fb7-84292f113454\") " pod="metallb-system/speaker-msrzn" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.577273 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/62d69338-9eb3-4401-95af-3dcaf1ce48d3-metrics-certs\") pod \"controller-5bddd4b946-ztwkd\" (UID: \"62d69338-9eb3-4401-95af-3dcaf1ce48d3\") " pod="metallb-system/controller-5bddd4b946-ztwkd" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.577398 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/f5a1ce03-3e27-472c-9e32-20c967308ac8-frr-startup\") pod \"frr-k8s-dhdgh\" (UID: \"f5a1ce03-3e27-472c-9e32-20c967308ac8\") " pod="metallb-system/frr-k8s-dhdgh" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.577441 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tfp6q\" (UniqueName: \"kubernetes.io/projected/a8265d43-4c9a-499d-9fb7-84292f113454-kube-api-access-tfp6q\") pod \"speaker-msrzn\" (UID: \"a8265d43-4c9a-499d-9fb7-84292f113454\") " pod="metallb-system/speaker-msrzn" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.578432 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/f5a1ce03-3e27-472c-9e32-20c967308ac8-frr-startup\") pod \"frr-k8s-dhdgh\" (UID: \"f5a1ce03-3e27-472c-9e32-20c967308ac8\") " pod="metallb-system/frr-k8s-dhdgh" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.584206 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/73796739-d310-4f62-96b9-1634f13d77ae-cert\") pod \"frr-k8s-webhook-server-7784b6fcf-n8npn\" (UID: \"73796739-d310-4f62-96b9-1634f13d77ae\") " pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-n8npn" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.584359 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f5a1ce03-3e27-472c-9e32-20c967308ac8-metrics-certs\") pod \"frr-k8s-dhdgh\" (UID: \"f5a1ce03-3e27-472c-9e32-20c967308ac8\") " pod="metallb-system/frr-k8s-dhdgh" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.595763 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g4h45\" (UniqueName: \"kubernetes.io/projected/f5a1ce03-3e27-472c-9e32-20c967308ac8-kube-api-access-g4h45\") pod \"frr-k8s-dhdgh\" (UID: \"f5a1ce03-3e27-472c-9e32-20c967308ac8\") " pod="metallb-system/frr-k8s-dhdgh" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.597475 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w79nc\" (UniqueName: \"kubernetes.io/projected/73796739-d310-4f62-96b9-1634f13d77ae-kube-api-access-w79nc\") pod \"frr-k8s-webhook-server-7784b6fcf-n8npn\" (UID: \"73796739-d310-4f62-96b9-1634f13d77ae\") " pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-n8npn" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.679346 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/a8265d43-4c9a-499d-9fb7-84292f113454-memberlist\") pod \"speaker-msrzn\" (UID: \"a8265d43-4c9a-499d-9fb7-84292f113454\") " pod="metallb-system/speaker-msrzn" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.679443 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a8265d43-4c9a-499d-9fb7-84292f113454-metrics-certs\") pod \"speaker-msrzn\" (UID: \"a8265d43-4c9a-499d-9fb7-84292f113454\") " pod="metallb-system/speaker-msrzn" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.679469 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/62d69338-9eb3-4401-95af-3dcaf1ce48d3-metrics-certs\") pod \"controller-5bddd4b946-ztwkd\" (UID: \"62d69338-9eb3-4401-95af-3dcaf1ce48d3\") " pod="metallb-system/controller-5bddd4b946-ztwkd" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.679525 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tfp6q\" (UniqueName: \"kubernetes.io/projected/a8265d43-4c9a-499d-9fb7-84292f113454-kube-api-access-tfp6q\") pod \"speaker-msrzn\" (UID: \"a8265d43-4c9a-499d-9fb7-84292f113454\") " pod="metallb-system/speaker-msrzn" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.679572 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/62d69338-9eb3-4401-95af-3dcaf1ce48d3-cert\") pod \"controller-5bddd4b946-ztwkd\" (UID: \"62d69338-9eb3-4401-95af-3dcaf1ce48d3\") " pod="metallb-system/controller-5bddd4b946-ztwkd" Jan 05 22:05:23 crc kubenswrapper[4910]: E0105 22:05:23.679595 4910 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.679629 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8542l\" (UniqueName: \"kubernetes.io/projected/62d69338-9eb3-4401-95af-3dcaf1ce48d3-kube-api-access-8542l\") pod \"controller-5bddd4b946-ztwkd\" (UID: \"62d69338-9eb3-4401-95af-3dcaf1ce48d3\") " pod="metallb-system/controller-5bddd4b946-ztwkd" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.679661 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/a8265d43-4c9a-499d-9fb7-84292f113454-metallb-excludel2\") pod \"speaker-msrzn\" (UID: \"a8265d43-4c9a-499d-9fb7-84292f113454\") " pod="metallb-system/speaker-msrzn" Jan 05 22:05:23 crc kubenswrapper[4910]: E0105 22:05:23.679704 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a8265d43-4c9a-499d-9fb7-84292f113454-memberlist podName:a8265d43-4c9a-499d-9fb7-84292f113454 nodeName:}" failed. No retries permitted until 2026-01-05 22:05:24.179678731 +0000 UTC m=+855.757176491 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/a8265d43-4c9a-499d-9fb7-84292f113454-memberlist") pod "speaker-msrzn" (UID: "a8265d43-4c9a-499d-9fb7-84292f113454") : secret "metallb-memberlist" not found Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.680902 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/a8265d43-4c9a-499d-9fb7-84292f113454-metallb-excludel2\") pod \"speaker-msrzn\" (UID: \"a8265d43-4c9a-499d-9fb7-84292f113454\") " pod="metallb-system/speaker-msrzn" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.683509 4910 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.683745 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a8265d43-4c9a-499d-9fb7-84292f113454-metrics-certs\") pod \"speaker-msrzn\" (UID: \"a8265d43-4c9a-499d-9fb7-84292f113454\") " pod="metallb-system/speaker-msrzn" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.683759 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/62d69338-9eb3-4401-95af-3dcaf1ce48d3-metrics-certs\") pod \"controller-5bddd4b946-ztwkd\" (UID: \"62d69338-9eb3-4401-95af-3dcaf1ce48d3\") " pod="metallb-system/controller-5bddd4b946-ztwkd" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.693395 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/62d69338-9eb3-4401-95af-3dcaf1ce48d3-cert\") pod \"controller-5bddd4b946-ztwkd\" (UID: \"62d69338-9eb3-4401-95af-3dcaf1ce48d3\") " pod="metallb-system/controller-5bddd4b946-ztwkd" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.698164 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8542l\" (UniqueName: \"kubernetes.io/projected/62d69338-9eb3-4401-95af-3dcaf1ce48d3-kube-api-access-8542l\") pod \"controller-5bddd4b946-ztwkd\" (UID: \"62d69338-9eb3-4401-95af-3dcaf1ce48d3\") " pod="metallb-system/controller-5bddd4b946-ztwkd" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.701176 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tfp6q\" (UniqueName: \"kubernetes.io/projected/a8265d43-4c9a-499d-9fb7-84292f113454-kube-api-access-tfp6q\") pod \"speaker-msrzn\" (UID: \"a8265d43-4c9a-499d-9fb7-84292f113454\") " pod="metallb-system/speaker-msrzn" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.706664 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-dhdgh" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.721160 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-n8npn" Jan 05 22:05:23 crc kubenswrapper[4910]: I0105 22:05:23.804995 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-5bddd4b946-ztwkd" Jan 05 22:05:24 crc kubenswrapper[4910]: W0105 22:05:24.079721 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod62d69338_9eb3_4401_95af_3dcaf1ce48d3.slice/crio-0d29c6937c2434b553e254491cf5158138b5019efbe257449bad631b95ede772 WatchSource:0}: Error finding container 0d29c6937c2434b553e254491cf5158138b5019efbe257449bad631b95ede772: Status 404 returned error can't find the container with id 0d29c6937c2434b553e254491cf5158138b5019efbe257449bad631b95ede772 Jan 05 22:05:24 crc kubenswrapper[4910]: I0105 22:05:24.081313 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-5bddd4b946-ztwkd"] Jan 05 22:05:24 crc kubenswrapper[4910]: I0105 22:05:24.160586 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7784b6fcf-n8npn"] Jan 05 22:05:24 crc kubenswrapper[4910]: I0105 22:05:24.189925 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/a8265d43-4c9a-499d-9fb7-84292f113454-memberlist\") pod \"speaker-msrzn\" (UID: \"a8265d43-4c9a-499d-9fb7-84292f113454\") " pod="metallb-system/speaker-msrzn" Jan 05 22:05:24 crc kubenswrapper[4910]: E0105 22:05:24.190157 4910 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 05 22:05:24 crc kubenswrapper[4910]: E0105 22:05:24.190265 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a8265d43-4c9a-499d-9fb7-84292f113454-memberlist podName:a8265d43-4c9a-499d-9fb7-84292f113454 nodeName:}" failed. No retries permitted until 2026-01-05 22:05:25.190240093 +0000 UTC m=+856.767737763 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/a8265d43-4c9a-499d-9fb7-84292f113454-memberlist") pod "speaker-msrzn" (UID: "a8265d43-4c9a-499d-9fb7-84292f113454") : secret "metallb-memberlist" not found Jan 05 22:05:24 crc kubenswrapper[4910]: I0105 22:05:24.472082 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-5bddd4b946-ztwkd" event={"ID":"62d69338-9eb3-4401-95af-3dcaf1ce48d3","Type":"ContainerStarted","Data":"78a2b848ab310c58a57a76742c03a814123f79c5eaa3e8406b8e6339751d0330"} Jan 05 22:05:24 crc kubenswrapper[4910]: I0105 22:05:24.472162 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-5bddd4b946-ztwkd" event={"ID":"62d69338-9eb3-4401-95af-3dcaf1ce48d3","Type":"ContainerStarted","Data":"0d29c6937c2434b553e254491cf5158138b5019efbe257449bad631b95ede772"} Jan 05 22:05:24 crc kubenswrapper[4910]: I0105 22:05:24.473650 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-n8npn" event={"ID":"73796739-d310-4f62-96b9-1634f13d77ae","Type":"ContainerStarted","Data":"e7ad5c04fcdad153b270af15483b47dc124f4ace21d5d4961078a05e59e68aa1"} Jan 05 22:05:24 crc kubenswrapper[4910]: I0105 22:05:24.479980 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dhdgh" event={"ID":"f5a1ce03-3e27-472c-9e32-20c967308ac8","Type":"ContainerStarted","Data":"4bae77fc84d93b43306d19163767245dc923ad946267a6759604fb599c0b8460"} Jan 05 22:05:25 crc kubenswrapper[4910]: I0105 22:05:25.205024 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/a8265d43-4c9a-499d-9fb7-84292f113454-memberlist\") pod \"speaker-msrzn\" (UID: \"a8265d43-4c9a-499d-9fb7-84292f113454\") " pod="metallb-system/speaker-msrzn" Jan 05 22:05:25 crc kubenswrapper[4910]: I0105 22:05:25.214500 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/a8265d43-4c9a-499d-9fb7-84292f113454-memberlist\") pod \"speaker-msrzn\" (UID: \"a8265d43-4c9a-499d-9fb7-84292f113454\") " pod="metallb-system/speaker-msrzn" Jan 05 22:05:25 crc kubenswrapper[4910]: I0105 22:05:25.278060 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-msrzn" Jan 05 22:05:25 crc kubenswrapper[4910]: I0105 22:05:25.491256 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-msrzn" event={"ID":"a8265d43-4c9a-499d-9fb7-84292f113454","Type":"ContainerStarted","Data":"3dc98586165bcd5f67af2a762f38377b4609c23aea36528d2b2175da687446fd"} Jan 05 22:05:25 crc kubenswrapper[4910]: I0105 22:05:25.497733 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-5bddd4b946-ztwkd" event={"ID":"62d69338-9eb3-4401-95af-3dcaf1ce48d3","Type":"ContainerStarted","Data":"f0135da2964fee0cce186dce79df9d288477bcba584ac643a9d3e581da0cc2fb"} Jan 05 22:05:25 crc kubenswrapper[4910]: I0105 22:05:25.498585 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-5bddd4b946-ztwkd" Jan 05 22:05:25 crc kubenswrapper[4910]: I0105 22:05:25.524091 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-5bddd4b946-ztwkd" podStartSLOduration=2.524076995 podStartE2EDuration="2.524076995s" podCreationTimestamp="2026-01-05 22:05:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:05:25.521338447 +0000 UTC m=+857.098836117" watchObservedRunningTime="2026-01-05 22:05:25.524076995 +0000 UTC m=+857.101574665" Jan 05 22:05:26 crc kubenswrapper[4910]: I0105 22:05:26.508834 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-msrzn" event={"ID":"a8265d43-4c9a-499d-9fb7-84292f113454","Type":"ContainerStarted","Data":"9e7eb7e075a22cd08f9aeec592404981bfe6f17fabdd5ea27421aba7b033ee31"} Jan 05 22:05:26 crc kubenswrapper[4910]: I0105 22:05:26.509339 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-msrzn" event={"ID":"a8265d43-4c9a-499d-9fb7-84292f113454","Type":"ContainerStarted","Data":"0f2b9d3ab55fb36c04e55cd948700bc15e8fe15b256fdab11e1fca51343d4a56"} Jan 05 22:05:26 crc kubenswrapper[4910]: I0105 22:05:26.509364 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-msrzn" Jan 05 22:05:26 crc kubenswrapper[4910]: I0105 22:05:26.531193 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-msrzn" podStartSLOduration=3.53117195 podStartE2EDuration="3.53117195s" podCreationTimestamp="2026-01-05 22:05:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:05:26.528553506 +0000 UTC m=+858.106051176" watchObservedRunningTime="2026-01-05 22:05:26.53117195 +0000 UTC m=+858.108669620" Jan 05 22:05:33 crc kubenswrapper[4910]: I0105 22:05:33.559327 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-n8npn" event={"ID":"73796739-d310-4f62-96b9-1634f13d77ae","Type":"ContainerStarted","Data":"c905deacb03e80aaf0b4da7ee792e5accbdb13d8cd0c82c53be40160cc5f3091"} Jan 05 22:05:33 crc kubenswrapper[4910]: I0105 22:05:33.560167 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-n8npn" Jan 05 22:05:33 crc kubenswrapper[4910]: I0105 22:05:33.562771 4910 generic.go:334] "Generic (PLEG): container finished" podID="f5a1ce03-3e27-472c-9e32-20c967308ac8" containerID="b896b3c435af896ea135cae7f8105e1ef483b25b665d652a007907de2d5d0184" exitCode=0 Jan 05 22:05:33 crc kubenswrapper[4910]: I0105 22:05:33.562868 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dhdgh" event={"ID":"f5a1ce03-3e27-472c-9e32-20c967308ac8","Type":"ContainerDied","Data":"b896b3c435af896ea135cae7f8105e1ef483b25b665d652a007907de2d5d0184"} Jan 05 22:05:33 crc kubenswrapper[4910]: I0105 22:05:33.587488 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-n8npn" podStartSLOduration=2.001507077 podStartE2EDuration="10.587450672s" podCreationTimestamp="2026-01-05 22:05:23 +0000 UTC" firstStartedPulling="2026-01-05 22:05:24.178853101 +0000 UTC m=+855.756350771" lastFinishedPulling="2026-01-05 22:05:32.764796696 +0000 UTC m=+864.342294366" observedRunningTime="2026-01-05 22:05:33.578368758 +0000 UTC m=+865.155866428" watchObservedRunningTime="2026-01-05 22:05:33.587450672 +0000 UTC m=+865.164948382" Jan 05 22:05:34 crc kubenswrapper[4910]: I0105 22:05:34.570874 4910 generic.go:334] "Generic (PLEG): container finished" podID="f5a1ce03-3e27-472c-9e32-20c967308ac8" containerID="1f0bd454c79a5b05484f571d554ffe9862c1817c0ca8e1603cfd200b80d3bff4" exitCode=0 Jan 05 22:05:34 crc kubenswrapper[4910]: I0105 22:05:34.571608 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dhdgh" event={"ID":"f5a1ce03-3e27-472c-9e32-20c967308ac8","Type":"ContainerDied","Data":"1f0bd454c79a5b05484f571d554ffe9862c1817c0ca8e1603cfd200b80d3bff4"} Jan 05 22:05:35 crc kubenswrapper[4910]: I0105 22:05:35.282311 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-msrzn" Jan 05 22:05:35 crc kubenswrapper[4910]: I0105 22:05:35.579620 4910 generic.go:334] "Generic (PLEG): container finished" podID="f5a1ce03-3e27-472c-9e32-20c967308ac8" containerID="409ab3aa3842321f1725569ccb02ffb1576b32a70a54d145edac30b26fe92776" exitCode=0 Jan 05 22:05:35 crc kubenswrapper[4910]: I0105 22:05:35.579687 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dhdgh" event={"ID":"f5a1ce03-3e27-472c-9e32-20c967308ac8","Type":"ContainerDied","Data":"409ab3aa3842321f1725569ccb02ffb1576b32a70a54d145edac30b26fe92776"} Jan 05 22:05:36 crc kubenswrapper[4910]: I0105 22:05:36.603132 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dhdgh" event={"ID":"f5a1ce03-3e27-472c-9e32-20c967308ac8","Type":"ContainerStarted","Data":"31724922cd36f8b3fc891e089f7d4818a91cccca0da576c2bd6d8b28ca85ef01"} Jan 05 22:05:36 crc kubenswrapper[4910]: I0105 22:05:36.603536 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dhdgh" event={"ID":"f5a1ce03-3e27-472c-9e32-20c967308ac8","Type":"ContainerStarted","Data":"47d1d9cb1e3221a094a9ab054fec5885e74c91528e244322a6f06bbddfef82cf"} Jan 05 22:05:36 crc kubenswrapper[4910]: I0105 22:05:36.603553 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dhdgh" event={"ID":"f5a1ce03-3e27-472c-9e32-20c967308ac8","Type":"ContainerStarted","Data":"0e3b2d702bb386b0f75e4944d0c7e95ba9ddc6093983e5f604b8268d23a4bbc3"} Jan 05 22:05:36 crc kubenswrapper[4910]: I0105 22:05:36.603564 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dhdgh" event={"ID":"f5a1ce03-3e27-472c-9e32-20c967308ac8","Type":"ContainerStarted","Data":"51ce9b518d47a653384cf343df4a65e18b052eefb76bfad0bd4122a7ca80eadd"} Jan 05 22:05:36 crc kubenswrapper[4910]: I0105 22:05:36.603575 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dhdgh" event={"ID":"f5a1ce03-3e27-472c-9e32-20c967308ac8","Type":"ContainerStarted","Data":"3bcf2fc47a4cecca541e562a6de3c3079f37b8eb222b9cd07309f439cb40ae97"} Jan 05 22:05:36 crc kubenswrapper[4910]: I0105 22:05:36.883864 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b"] Jan 05 22:05:36 crc kubenswrapper[4910]: I0105 22:05:36.885055 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b" Jan 05 22:05:36 crc kubenswrapper[4910]: I0105 22:05:36.887872 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 05 22:05:36 crc kubenswrapper[4910]: I0105 22:05:36.965987 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b"] Jan 05 22:05:37 crc kubenswrapper[4910]: I0105 22:05:37.053970 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6a5f9668-f09e-4e0e-a0df-82f08d28bb9b-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b\" (UID: \"6a5f9668-f09e-4e0e-a0df-82f08d28bb9b\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b" Jan 05 22:05:37 crc kubenswrapper[4910]: I0105 22:05:37.054032 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6a5f9668-f09e-4e0e-a0df-82f08d28bb9b-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b\" (UID: \"6a5f9668-f09e-4e0e-a0df-82f08d28bb9b\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b" Jan 05 22:05:37 crc kubenswrapper[4910]: I0105 22:05:37.054071 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7vgcz\" (UniqueName: \"kubernetes.io/projected/6a5f9668-f09e-4e0e-a0df-82f08d28bb9b-kube-api-access-7vgcz\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b\" (UID: \"6a5f9668-f09e-4e0e-a0df-82f08d28bb9b\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b" Jan 05 22:05:37 crc kubenswrapper[4910]: I0105 22:05:37.155098 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6a5f9668-f09e-4e0e-a0df-82f08d28bb9b-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b\" (UID: \"6a5f9668-f09e-4e0e-a0df-82f08d28bb9b\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b" Jan 05 22:05:37 crc kubenswrapper[4910]: I0105 22:05:37.155173 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6a5f9668-f09e-4e0e-a0df-82f08d28bb9b-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b\" (UID: \"6a5f9668-f09e-4e0e-a0df-82f08d28bb9b\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b" Jan 05 22:05:37 crc kubenswrapper[4910]: I0105 22:05:37.155199 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7vgcz\" (UniqueName: \"kubernetes.io/projected/6a5f9668-f09e-4e0e-a0df-82f08d28bb9b-kube-api-access-7vgcz\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b\" (UID: \"6a5f9668-f09e-4e0e-a0df-82f08d28bb9b\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b" Jan 05 22:05:37 crc kubenswrapper[4910]: I0105 22:05:37.155946 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6a5f9668-f09e-4e0e-a0df-82f08d28bb9b-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b\" (UID: \"6a5f9668-f09e-4e0e-a0df-82f08d28bb9b\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b" Jan 05 22:05:37 crc kubenswrapper[4910]: I0105 22:05:37.156176 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6a5f9668-f09e-4e0e-a0df-82f08d28bb9b-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b\" (UID: \"6a5f9668-f09e-4e0e-a0df-82f08d28bb9b\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b" Jan 05 22:05:37 crc kubenswrapper[4910]: I0105 22:05:37.178467 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7vgcz\" (UniqueName: \"kubernetes.io/projected/6a5f9668-f09e-4e0e-a0df-82f08d28bb9b-kube-api-access-7vgcz\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b\" (UID: \"6a5f9668-f09e-4e0e-a0df-82f08d28bb9b\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b" Jan 05 22:05:37 crc kubenswrapper[4910]: I0105 22:05:37.239750 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b" Jan 05 22:05:37 crc kubenswrapper[4910]: I0105 22:05:37.577099 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b"] Jan 05 22:05:37 crc kubenswrapper[4910]: I0105 22:05:37.618647 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b" event={"ID":"6a5f9668-f09e-4e0e-a0df-82f08d28bb9b","Type":"ContainerStarted","Data":"dd03b27bc4a010afdaf888f9b2d05694e3ad7add67288b9fee196f4e5b23fc8a"} Jan 05 22:05:37 crc kubenswrapper[4910]: I0105 22:05:37.624517 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dhdgh" event={"ID":"f5a1ce03-3e27-472c-9e32-20c967308ac8","Type":"ContainerStarted","Data":"9095741f28e40a80fcd7dd9a25c1bea407e408a13601eecb0d7385268b2e6e2a"} Jan 05 22:05:37 crc kubenswrapper[4910]: I0105 22:05:37.626398 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-dhdgh" Jan 05 22:05:37 crc kubenswrapper[4910]: I0105 22:05:37.653670 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-dhdgh" podStartSLOduration=6.269856461 podStartE2EDuration="14.653647459s" podCreationTimestamp="2026-01-05 22:05:23 +0000 UTC" firstStartedPulling="2026-01-05 22:05:24.406652872 +0000 UTC m=+855.984150552" lastFinishedPulling="2026-01-05 22:05:32.79044388 +0000 UTC m=+864.367941550" observedRunningTime="2026-01-05 22:05:37.648914152 +0000 UTC m=+869.226411832" watchObservedRunningTime="2026-01-05 22:05:37.653647459 +0000 UTC m=+869.231145129" Jan 05 22:05:38 crc kubenswrapper[4910]: I0105 22:05:38.632326 4910 generic.go:334] "Generic (PLEG): container finished" podID="6a5f9668-f09e-4e0e-a0df-82f08d28bb9b" containerID="4b96bb1490300056e81b20eb55a4ef951e1f320373f52342c504c2591f730a9c" exitCode=0 Jan 05 22:05:38 crc kubenswrapper[4910]: I0105 22:05:38.632429 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b" event={"ID":"6a5f9668-f09e-4e0e-a0df-82f08d28bb9b","Type":"ContainerDied","Data":"4b96bb1490300056e81b20eb55a4ef951e1f320373f52342c504c2591f730a9c"} Jan 05 22:05:38 crc kubenswrapper[4910]: I0105 22:05:38.707189 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-dhdgh" Jan 05 22:05:38 crc kubenswrapper[4910]: I0105 22:05:38.767358 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-dhdgh" Jan 05 22:05:43 crc kubenswrapper[4910]: I0105 22:05:43.669946 4910 generic.go:334] "Generic (PLEG): container finished" podID="6a5f9668-f09e-4e0e-a0df-82f08d28bb9b" containerID="f151ac6dc8c490bafa1a53b995f12fb9fe7f0b2318d762dfe0af1f4505e39758" exitCode=0 Jan 05 22:05:43 crc kubenswrapper[4910]: I0105 22:05:43.670069 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b" event={"ID":"6a5f9668-f09e-4e0e-a0df-82f08d28bb9b","Type":"ContainerDied","Data":"f151ac6dc8c490bafa1a53b995f12fb9fe7f0b2318d762dfe0af1f4505e39758"} Jan 05 22:05:43 crc kubenswrapper[4910]: I0105 22:05:43.732595 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-n8npn" Jan 05 22:05:43 crc kubenswrapper[4910]: I0105 22:05:43.812138 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-5bddd4b946-ztwkd" Jan 05 22:05:44 crc kubenswrapper[4910]: I0105 22:05:44.679897 4910 generic.go:334] "Generic (PLEG): container finished" podID="6a5f9668-f09e-4e0e-a0df-82f08d28bb9b" containerID="96ba858145d5102ce84d3a7de567d5df508801f5273c91b5ca8bdb3bcc02e682" exitCode=0 Jan 05 22:05:44 crc kubenswrapper[4910]: I0105 22:05:44.679956 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b" event={"ID":"6a5f9668-f09e-4e0e-a0df-82f08d28bb9b","Type":"ContainerDied","Data":"96ba858145d5102ce84d3a7de567d5df508801f5273c91b5ca8bdb3bcc02e682"} Jan 05 22:05:45 crc kubenswrapper[4910]: I0105 22:05:45.974579 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b" Jan 05 22:05:46 crc kubenswrapper[4910]: I0105 22:05:46.113442 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7vgcz\" (UniqueName: \"kubernetes.io/projected/6a5f9668-f09e-4e0e-a0df-82f08d28bb9b-kube-api-access-7vgcz\") pod \"6a5f9668-f09e-4e0e-a0df-82f08d28bb9b\" (UID: \"6a5f9668-f09e-4e0e-a0df-82f08d28bb9b\") " Jan 05 22:05:46 crc kubenswrapper[4910]: I0105 22:05:46.113535 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6a5f9668-f09e-4e0e-a0df-82f08d28bb9b-bundle\") pod \"6a5f9668-f09e-4e0e-a0df-82f08d28bb9b\" (UID: \"6a5f9668-f09e-4e0e-a0df-82f08d28bb9b\") " Jan 05 22:05:46 crc kubenswrapper[4910]: I0105 22:05:46.113701 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6a5f9668-f09e-4e0e-a0df-82f08d28bb9b-util\") pod \"6a5f9668-f09e-4e0e-a0df-82f08d28bb9b\" (UID: \"6a5f9668-f09e-4e0e-a0df-82f08d28bb9b\") " Jan 05 22:05:46 crc kubenswrapper[4910]: I0105 22:05:46.114904 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6a5f9668-f09e-4e0e-a0df-82f08d28bb9b-bundle" (OuterVolumeSpecName: "bundle") pod "6a5f9668-f09e-4e0e-a0df-82f08d28bb9b" (UID: "6a5f9668-f09e-4e0e-a0df-82f08d28bb9b"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:05:46 crc kubenswrapper[4910]: I0105 22:05:46.119528 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a5f9668-f09e-4e0e-a0df-82f08d28bb9b-kube-api-access-7vgcz" (OuterVolumeSpecName: "kube-api-access-7vgcz") pod "6a5f9668-f09e-4e0e-a0df-82f08d28bb9b" (UID: "6a5f9668-f09e-4e0e-a0df-82f08d28bb9b"). InnerVolumeSpecName "kube-api-access-7vgcz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:05:46 crc kubenswrapper[4910]: I0105 22:05:46.123698 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6a5f9668-f09e-4e0e-a0df-82f08d28bb9b-util" (OuterVolumeSpecName: "util") pod "6a5f9668-f09e-4e0e-a0df-82f08d28bb9b" (UID: "6a5f9668-f09e-4e0e-a0df-82f08d28bb9b"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:05:46 crc kubenswrapper[4910]: I0105 22:05:46.215097 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7vgcz\" (UniqueName: \"kubernetes.io/projected/6a5f9668-f09e-4e0e-a0df-82f08d28bb9b-kube-api-access-7vgcz\") on node \"crc\" DevicePath \"\"" Jan 05 22:05:46 crc kubenswrapper[4910]: I0105 22:05:46.215139 4910 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6a5f9668-f09e-4e0e-a0df-82f08d28bb9b-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:05:46 crc kubenswrapper[4910]: I0105 22:05:46.215151 4910 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6a5f9668-f09e-4e0e-a0df-82f08d28bb9b-util\") on node \"crc\" DevicePath \"\"" Jan 05 22:05:46 crc kubenswrapper[4910]: I0105 22:05:46.697624 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b" event={"ID":"6a5f9668-f09e-4e0e-a0df-82f08d28bb9b","Type":"ContainerDied","Data":"dd03b27bc4a010afdaf888f9b2d05694e3ad7add67288b9fee196f4e5b23fc8a"} Jan 05 22:05:46 crc kubenswrapper[4910]: I0105 22:05:46.697674 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dd03b27bc4a010afdaf888f9b2d05694e3ad7add67288b9fee196f4e5b23fc8a" Jan 05 22:05:46 crc kubenswrapper[4910]: I0105 22:05:46.697725 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b" Jan 05 22:05:51 crc kubenswrapper[4910]: I0105 22:05:50.416695 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-j2vcb"] Jan 05 22:05:51 crc kubenswrapper[4910]: E0105 22:05:50.417445 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a5f9668-f09e-4e0e-a0df-82f08d28bb9b" containerName="util" Jan 05 22:05:51 crc kubenswrapper[4910]: I0105 22:05:50.417456 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a5f9668-f09e-4e0e-a0df-82f08d28bb9b" containerName="util" Jan 05 22:05:51 crc kubenswrapper[4910]: E0105 22:05:50.417467 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a5f9668-f09e-4e0e-a0df-82f08d28bb9b" containerName="pull" Jan 05 22:05:51 crc kubenswrapper[4910]: I0105 22:05:50.417473 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a5f9668-f09e-4e0e-a0df-82f08d28bb9b" containerName="pull" Jan 05 22:05:51 crc kubenswrapper[4910]: E0105 22:05:50.417487 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a5f9668-f09e-4e0e-a0df-82f08d28bb9b" containerName="extract" Jan 05 22:05:51 crc kubenswrapper[4910]: I0105 22:05:50.417494 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a5f9668-f09e-4e0e-a0df-82f08d28bb9b" containerName="extract" Jan 05 22:05:51 crc kubenswrapper[4910]: I0105 22:05:50.417593 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a5f9668-f09e-4e0e-a0df-82f08d28bb9b" containerName="extract" Jan 05 22:05:51 crc kubenswrapper[4910]: I0105 22:05:50.418013 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-j2vcb" Jan 05 22:05:51 crc kubenswrapper[4910]: I0105 22:05:50.420330 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"openshift-service-ca.crt" Jan 05 22:05:51 crc kubenswrapper[4910]: I0105 22:05:50.420379 4910 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager-operator"/"cert-manager-operator-controller-manager-dockercfg-pnj6j" Jan 05 22:05:51 crc kubenswrapper[4910]: I0105 22:05:50.421246 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"kube-root-ca.crt" Jan 05 22:05:51 crc kubenswrapper[4910]: I0105 22:05:50.447294 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-j2vcb"] Jan 05 22:05:51 crc kubenswrapper[4910]: I0105 22:05:50.578605 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/a4592683-de7d-40c3-956c-40b18a15d1e4-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-j2vcb\" (UID: \"a4592683-de7d-40c3-956c-40b18a15d1e4\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-j2vcb" Jan 05 22:05:51 crc kubenswrapper[4910]: I0105 22:05:50.578681 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lpwpf\" (UniqueName: \"kubernetes.io/projected/a4592683-de7d-40c3-956c-40b18a15d1e4-kube-api-access-lpwpf\") pod \"cert-manager-operator-controller-manager-64cf6dff88-j2vcb\" (UID: \"a4592683-de7d-40c3-956c-40b18a15d1e4\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-j2vcb" Jan 05 22:05:51 crc kubenswrapper[4910]: I0105 22:05:50.680157 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/a4592683-de7d-40c3-956c-40b18a15d1e4-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-j2vcb\" (UID: \"a4592683-de7d-40c3-956c-40b18a15d1e4\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-j2vcb" Jan 05 22:05:51 crc kubenswrapper[4910]: I0105 22:05:50.680253 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lpwpf\" (UniqueName: \"kubernetes.io/projected/a4592683-de7d-40c3-956c-40b18a15d1e4-kube-api-access-lpwpf\") pod \"cert-manager-operator-controller-manager-64cf6dff88-j2vcb\" (UID: \"a4592683-de7d-40c3-956c-40b18a15d1e4\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-j2vcb" Jan 05 22:05:51 crc kubenswrapper[4910]: I0105 22:05:50.680670 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/a4592683-de7d-40c3-956c-40b18a15d1e4-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-j2vcb\" (UID: \"a4592683-de7d-40c3-956c-40b18a15d1e4\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-j2vcb" Jan 05 22:05:51 crc kubenswrapper[4910]: I0105 22:05:50.711613 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lpwpf\" (UniqueName: \"kubernetes.io/projected/a4592683-de7d-40c3-956c-40b18a15d1e4-kube-api-access-lpwpf\") pod \"cert-manager-operator-controller-manager-64cf6dff88-j2vcb\" (UID: \"a4592683-de7d-40c3-956c-40b18a15d1e4\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-j2vcb" Jan 05 22:05:51 crc kubenswrapper[4910]: I0105 22:05:50.732404 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-j2vcb" Jan 05 22:05:51 crc kubenswrapper[4910]: I0105 22:05:51.846266 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-j2vcb"] Jan 05 22:05:52 crc kubenswrapper[4910]: I0105 22:05:52.738604 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-j2vcb" event={"ID":"a4592683-de7d-40c3-956c-40b18a15d1e4","Type":"ContainerStarted","Data":"0124cba7422c8ab89112cc706fdf3186cec7571502dd2f35ad7776cd408d81cc"} Jan 05 22:05:53 crc kubenswrapper[4910]: I0105 22:05:53.710278 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-dhdgh" Jan 05 22:05:59 crc kubenswrapper[4910]: I0105 22:05:59.807846 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-j2vcb" event={"ID":"a4592683-de7d-40c3-956c-40b18a15d1e4","Type":"ContainerStarted","Data":"a9da727df9425416678aa75d330d6b72ffaa71de5a24ebd6971bfa9d1ec8e878"} Jan 05 22:05:59 crc kubenswrapper[4910]: I0105 22:05:59.837221 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-j2vcb" podStartSLOduration=3.030056294 podStartE2EDuration="9.837204788s" podCreationTimestamp="2026-01-05 22:05:50 +0000 UTC" firstStartedPulling="2026-01-05 22:05:51.855797526 +0000 UTC m=+883.433295196" lastFinishedPulling="2026-01-05 22:05:58.66294601 +0000 UTC m=+890.240443690" observedRunningTime="2026-01-05 22:05:59.833197139 +0000 UTC m=+891.410694809" watchObservedRunningTime="2026-01-05 22:05:59.837204788 +0000 UTC m=+891.414702458" Jan 05 22:06:03 crc kubenswrapper[4910]: I0105 22:06:03.108038 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-5rk2d"] Jan 05 22:06:03 crc kubenswrapper[4910]: I0105 22:06:03.109180 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-5rk2d" Jan 05 22:06:03 crc kubenswrapper[4910]: I0105 22:06:03.111622 4910 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-pff77" Jan 05 22:06:03 crc kubenswrapper[4910]: I0105 22:06:03.112184 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Jan 05 22:06:03 crc kubenswrapper[4910]: I0105 22:06:03.112912 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Jan 05 22:06:03 crc kubenswrapper[4910]: I0105 22:06:03.121682 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-5rk2d"] Jan 05 22:06:03 crc kubenswrapper[4910]: I0105 22:06:03.175274 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2af2cd66-2e31-4c27-b216-136a81c22df8-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-5rk2d\" (UID: \"2af2cd66-2e31-4c27-b216-136a81c22df8\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-5rk2d" Jan 05 22:06:03 crc kubenswrapper[4910]: I0105 22:06:03.175346 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4gs4\" (UniqueName: \"kubernetes.io/projected/2af2cd66-2e31-4c27-b216-136a81c22df8-kube-api-access-q4gs4\") pod \"cert-manager-webhook-f4fb5df64-5rk2d\" (UID: \"2af2cd66-2e31-4c27-b216-136a81c22df8\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-5rk2d" Jan 05 22:06:03 crc kubenswrapper[4910]: I0105 22:06:03.276950 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2af2cd66-2e31-4c27-b216-136a81c22df8-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-5rk2d\" (UID: \"2af2cd66-2e31-4c27-b216-136a81c22df8\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-5rk2d" Jan 05 22:06:03 crc kubenswrapper[4910]: I0105 22:06:03.277016 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4gs4\" (UniqueName: \"kubernetes.io/projected/2af2cd66-2e31-4c27-b216-136a81c22df8-kube-api-access-q4gs4\") pod \"cert-manager-webhook-f4fb5df64-5rk2d\" (UID: \"2af2cd66-2e31-4c27-b216-136a81c22df8\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-5rk2d" Jan 05 22:06:03 crc kubenswrapper[4910]: I0105 22:06:03.295204 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2af2cd66-2e31-4c27-b216-136a81c22df8-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-5rk2d\" (UID: \"2af2cd66-2e31-4c27-b216-136a81c22df8\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-5rk2d" Jan 05 22:06:03 crc kubenswrapper[4910]: I0105 22:06:03.313351 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4gs4\" (UniqueName: \"kubernetes.io/projected/2af2cd66-2e31-4c27-b216-136a81c22df8-kube-api-access-q4gs4\") pod \"cert-manager-webhook-f4fb5df64-5rk2d\" (UID: \"2af2cd66-2e31-4c27-b216-136a81c22df8\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-5rk2d" Jan 05 22:06:03 crc kubenswrapper[4910]: I0105 22:06:03.425388 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-5rk2d" Jan 05 22:06:03 crc kubenswrapper[4910]: I0105 22:06:03.660911 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-dzs8b"] Jan 05 22:06:03 crc kubenswrapper[4910]: I0105 22:06:03.662342 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-dzs8b" Jan 05 22:06:03 crc kubenswrapper[4910]: I0105 22:06:03.666704 4910 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-kf6bl" Jan 05 22:06:03 crc kubenswrapper[4910]: I0105 22:06:03.681890 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-dzs8b"] Jan 05 22:06:03 crc kubenswrapper[4910]: I0105 22:06:03.783969 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d78320ee-d759-4e45-9cd8-c0c8f1570ef7-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-dzs8b\" (UID: \"d78320ee-d759-4e45-9cd8-c0c8f1570ef7\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-dzs8b" Jan 05 22:06:03 crc kubenswrapper[4910]: I0105 22:06:03.784033 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vstvs\" (UniqueName: \"kubernetes.io/projected/d78320ee-d759-4e45-9cd8-c0c8f1570ef7-kube-api-access-vstvs\") pod \"cert-manager-cainjector-855d9ccff4-dzs8b\" (UID: \"d78320ee-d759-4e45-9cd8-c0c8f1570ef7\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-dzs8b" Jan 05 22:06:03 crc kubenswrapper[4910]: I0105 22:06:03.885762 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d78320ee-d759-4e45-9cd8-c0c8f1570ef7-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-dzs8b\" (UID: \"d78320ee-d759-4e45-9cd8-c0c8f1570ef7\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-dzs8b" Jan 05 22:06:03 crc kubenswrapper[4910]: I0105 22:06:03.885835 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vstvs\" (UniqueName: \"kubernetes.io/projected/d78320ee-d759-4e45-9cd8-c0c8f1570ef7-kube-api-access-vstvs\") pod \"cert-manager-cainjector-855d9ccff4-dzs8b\" (UID: \"d78320ee-d759-4e45-9cd8-c0c8f1570ef7\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-dzs8b" Jan 05 22:06:03 crc kubenswrapper[4910]: I0105 22:06:03.910269 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d78320ee-d759-4e45-9cd8-c0c8f1570ef7-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-dzs8b\" (UID: \"d78320ee-d759-4e45-9cd8-c0c8f1570ef7\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-dzs8b" Jan 05 22:06:03 crc kubenswrapper[4910]: I0105 22:06:03.920628 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vstvs\" (UniqueName: \"kubernetes.io/projected/d78320ee-d759-4e45-9cd8-c0c8f1570ef7-kube-api-access-vstvs\") pod \"cert-manager-cainjector-855d9ccff4-dzs8b\" (UID: \"d78320ee-d759-4e45-9cd8-c0c8f1570ef7\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-dzs8b" Jan 05 22:06:03 crc kubenswrapper[4910]: I0105 22:06:03.986374 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-5rk2d"] Jan 05 22:06:03 crc kubenswrapper[4910]: W0105 22:06:03.996000 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2af2cd66_2e31_4c27_b216_136a81c22df8.slice/crio-2797c0c99a80389d8d013e37b38cd9063681262672b86fd8ac6cab3c565fcef5 WatchSource:0}: Error finding container 2797c0c99a80389d8d013e37b38cd9063681262672b86fd8ac6cab3c565fcef5: Status 404 returned error can't find the container with id 2797c0c99a80389d8d013e37b38cd9063681262672b86fd8ac6cab3c565fcef5 Jan 05 22:06:04 crc kubenswrapper[4910]: I0105 22:06:04.000540 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-dzs8b" Jan 05 22:06:04 crc kubenswrapper[4910]: I0105 22:06:04.222158 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-dzs8b"] Jan 05 22:06:04 crc kubenswrapper[4910]: W0105 22:06:04.226200 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd78320ee_d759_4e45_9cd8_c0c8f1570ef7.slice/crio-196381a111564d9e5303834da523a1187a1fe300eb76fcb3f9141ebe0c7c3290 WatchSource:0}: Error finding container 196381a111564d9e5303834da523a1187a1fe300eb76fcb3f9141ebe0c7c3290: Status 404 returned error can't find the container with id 196381a111564d9e5303834da523a1187a1fe300eb76fcb3f9141ebe0c7c3290 Jan 05 22:06:04 crc kubenswrapper[4910]: I0105 22:06:04.856400 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-dzs8b" event={"ID":"d78320ee-d759-4e45-9cd8-c0c8f1570ef7","Type":"ContainerStarted","Data":"196381a111564d9e5303834da523a1187a1fe300eb76fcb3f9141ebe0c7c3290"} Jan 05 22:06:04 crc kubenswrapper[4910]: I0105 22:06:04.857854 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-5rk2d" event={"ID":"2af2cd66-2e31-4c27-b216-136a81c22df8","Type":"ContainerStarted","Data":"2797c0c99a80389d8d013e37b38cd9063681262672b86fd8ac6cab3c565fcef5"} Jan 05 22:06:12 crc kubenswrapper[4910]: I0105 22:06:12.916542 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-dzs8b" event={"ID":"d78320ee-d759-4e45-9cd8-c0c8f1570ef7","Type":"ContainerStarted","Data":"7ccb1d27dd2b30c83b01375d0479b65fbf62c04eed9d10b85addbbf5ff310f3c"} Jan 05 22:06:12 crc kubenswrapper[4910]: I0105 22:06:12.918729 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-5rk2d" event={"ID":"2af2cd66-2e31-4c27-b216-136a81c22df8","Type":"ContainerStarted","Data":"848a4bbb01b841597a495d2803833e839bb0b96eb8facd8670184e174a46e683"} Jan 05 22:06:12 crc kubenswrapper[4910]: I0105 22:06:12.918762 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-f4fb5df64-5rk2d" Jan 05 22:06:12 crc kubenswrapper[4910]: I0105 22:06:12.939237 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-855d9ccff4-dzs8b" podStartSLOduration=2.427814008 podStartE2EDuration="9.93921163s" podCreationTimestamp="2026-01-05 22:06:03 +0000 UTC" firstStartedPulling="2026-01-05 22:06:04.228566823 +0000 UTC m=+895.806064493" lastFinishedPulling="2026-01-05 22:06:11.739964445 +0000 UTC m=+903.317462115" observedRunningTime="2026-01-05 22:06:12.934916294 +0000 UTC m=+904.512413974" watchObservedRunningTime="2026-01-05 22:06:12.93921163 +0000 UTC m=+904.516709300" Jan 05 22:06:12 crc kubenswrapper[4910]: I0105 22:06:12.961276 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-f4fb5df64-5rk2d" podStartSLOduration=2.1993399399999998 podStartE2EDuration="9.961252775s" podCreationTimestamp="2026-01-05 22:06:03 +0000 UTC" firstStartedPulling="2026-01-05 22:06:03.998532676 +0000 UTC m=+895.576030346" lastFinishedPulling="2026-01-05 22:06:11.760445521 +0000 UTC m=+903.337943181" observedRunningTime="2026-01-05 22:06:12.955802551 +0000 UTC m=+904.533300231" watchObservedRunningTime="2026-01-05 22:06:12.961252775 +0000 UTC m=+904.538750445" Jan 05 22:06:18 crc kubenswrapper[4910]: I0105 22:06:18.428304 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-f4fb5df64-5rk2d" Jan 05 22:06:22 crc kubenswrapper[4910]: I0105 22:06:22.089513 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-86cb77c54b-lwkkl"] Jan 05 22:06:22 crc kubenswrapper[4910]: I0105 22:06:22.091225 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-lwkkl" Jan 05 22:06:22 crc kubenswrapper[4910]: I0105 22:06:22.093730 4910 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-h9n5m" Jan 05 22:06:22 crc kubenswrapper[4910]: I0105 22:06:22.103390 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-lwkkl"] Jan 05 22:06:22 crc kubenswrapper[4910]: I0105 22:06:22.112510 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5z5x2\" (UniqueName: \"kubernetes.io/projected/6991aa21-d1ec-4b50-8675-4876f90b6c9f-kube-api-access-5z5x2\") pod \"cert-manager-86cb77c54b-lwkkl\" (UID: \"6991aa21-d1ec-4b50-8675-4876f90b6c9f\") " pod="cert-manager/cert-manager-86cb77c54b-lwkkl" Jan 05 22:06:22 crc kubenswrapper[4910]: I0105 22:06:22.112761 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6991aa21-d1ec-4b50-8675-4876f90b6c9f-bound-sa-token\") pod \"cert-manager-86cb77c54b-lwkkl\" (UID: \"6991aa21-d1ec-4b50-8675-4876f90b6c9f\") " pod="cert-manager/cert-manager-86cb77c54b-lwkkl" Jan 05 22:06:22 crc kubenswrapper[4910]: I0105 22:06:22.213822 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6991aa21-d1ec-4b50-8675-4876f90b6c9f-bound-sa-token\") pod \"cert-manager-86cb77c54b-lwkkl\" (UID: \"6991aa21-d1ec-4b50-8675-4876f90b6c9f\") " pod="cert-manager/cert-manager-86cb77c54b-lwkkl" Jan 05 22:06:22 crc kubenswrapper[4910]: I0105 22:06:22.213958 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5z5x2\" (UniqueName: \"kubernetes.io/projected/6991aa21-d1ec-4b50-8675-4876f90b6c9f-kube-api-access-5z5x2\") pod \"cert-manager-86cb77c54b-lwkkl\" (UID: \"6991aa21-d1ec-4b50-8675-4876f90b6c9f\") " pod="cert-manager/cert-manager-86cb77c54b-lwkkl" Jan 05 22:06:22 crc kubenswrapper[4910]: I0105 22:06:22.235371 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6991aa21-d1ec-4b50-8675-4876f90b6c9f-bound-sa-token\") pod \"cert-manager-86cb77c54b-lwkkl\" (UID: \"6991aa21-d1ec-4b50-8675-4876f90b6c9f\") " pod="cert-manager/cert-manager-86cb77c54b-lwkkl" Jan 05 22:06:22 crc kubenswrapper[4910]: I0105 22:06:22.235538 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5z5x2\" (UniqueName: \"kubernetes.io/projected/6991aa21-d1ec-4b50-8675-4876f90b6c9f-kube-api-access-5z5x2\") pod \"cert-manager-86cb77c54b-lwkkl\" (UID: \"6991aa21-d1ec-4b50-8675-4876f90b6c9f\") " pod="cert-manager/cert-manager-86cb77c54b-lwkkl" Jan 05 22:06:22 crc kubenswrapper[4910]: I0105 22:06:22.416041 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-lwkkl" Jan 05 22:06:22 crc kubenswrapper[4910]: I0105 22:06:22.879603 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-lwkkl"] Jan 05 22:06:22 crc kubenswrapper[4910]: I0105 22:06:22.989264 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-lwkkl" event={"ID":"6991aa21-d1ec-4b50-8675-4876f90b6c9f","Type":"ContainerStarted","Data":"52c0366bc90192dd277fa49d379cdc1e0eba5b67455e86fb1f7401a159769697"} Jan 05 22:06:25 crc kubenswrapper[4910]: I0105 22:06:25.003847 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-lwkkl" event={"ID":"6991aa21-d1ec-4b50-8675-4876f90b6c9f","Type":"ContainerStarted","Data":"1020c581e930ec0cbeade2f87c90c3cd09b7475a9b8a887b280f732ace4a1971"} Jan 05 22:06:25 crc kubenswrapper[4910]: I0105 22:06:25.028187 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-86cb77c54b-lwkkl" podStartSLOduration=3.02815979 podStartE2EDuration="3.02815979s" podCreationTimestamp="2026-01-05 22:06:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:06:25.024010038 +0000 UTC m=+916.601507758" watchObservedRunningTime="2026-01-05 22:06:25.02815979 +0000 UTC m=+916.605657480" Jan 05 22:06:31 crc kubenswrapper[4910]: I0105 22:06:31.889886 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-lch75"] Jan 05 22:06:31 crc kubenswrapper[4910]: I0105 22:06:31.893405 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-lch75" Jan 05 22:06:31 crc kubenswrapper[4910]: I0105 22:06:31.897024 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-wsmk4" Jan 05 22:06:31 crc kubenswrapper[4910]: I0105 22:06:31.899575 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Jan 05 22:06:31 crc kubenswrapper[4910]: I0105 22:06:31.899640 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Jan 05 22:06:31 crc kubenswrapper[4910]: I0105 22:06:31.919424 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-lch75"] Jan 05 22:06:31 crc kubenswrapper[4910]: I0105 22:06:31.967375 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rqb7l\" (UniqueName: \"kubernetes.io/projected/9d903f96-6c0d-45b7-a304-4d0e870fd392-kube-api-access-rqb7l\") pod \"openstack-operator-index-lch75\" (UID: \"9d903f96-6c0d-45b7-a304-4d0e870fd392\") " pod="openstack-operators/openstack-operator-index-lch75" Jan 05 22:06:32 crc kubenswrapper[4910]: I0105 22:06:32.068848 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rqb7l\" (UniqueName: \"kubernetes.io/projected/9d903f96-6c0d-45b7-a304-4d0e870fd392-kube-api-access-rqb7l\") pod \"openstack-operator-index-lch75\" (UID: \"9d903f96-6c0d-45b7-a304-4d0e870fd392\") " pod="openstack-operators/openstack-operator-index-lch75" Jan 05 22:06:32 crc kubenswrapper[4910]: I0105 22:06:32.094819 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rqb7l\" (UniqueName: \"kubernetes.io/projected/9d903f96-6c0d-45b7-a304-4d0e870fd392-kube-api-access-rqb7l\") pod \"openstack-operator-index-lch75\" (UID: \"9d903f96-6c0d-45b7-a304-4d0e870fd392\") " pod="openstack-operators/openstack-operator-index-lch75" Jan 05 22:06:32 crc kubenswrapper[4910]: I0105 22:06:32.213759 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-lch75" Jan 05 22:06:32 crc kubenswrapper[4910]: I0105 22:06:32.427974 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-lch75"] Jan 05 22:06:33 crc kubenswrapper[4910]: I0105 22:06:33.055798 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-lch75" event={"ID":"9d903f96-6c0d-45b7-a304-4d0e870fd392","Type":"ContainerStarted","Data":"92d0e7b7d3047e8d62e702532178be5a877b0b4f91fb76ad147227c4f5a97efa"} Jan 05 22:06:34 crc kubenswrapper[4910]: I0105 22:06:34.064771 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-lch75" event={"ID":"9d903f96-6c0d-45b7-a304-4d0e870fd392","Type":"ContainerStarted","Data":"e7b241527e06032bc6e82eb2ea125fc374473d8d9fe9fff3b695780d2442ca37"} Jan 05 22:06:34 crc kubenswrapper[4910]: I0105 22:06:34.084818 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-lch75" podStartSLOduration=1.925806352 podStartE2EDuration="3.084795712s" podCreationTimestamp="2026-01-05 22:06:31 +0000 UTC" firstStartedPulling="2026-01-05 22:06:32.432828935 +0000 UTC m=+924.010326605" lastFinishedPulling="2026-01-05 22:06:33.591818295 +0000 UTC m=+925.169315965" observedRunningTime="2026-01-05 22:06:34.082659029 +0000 UTC m=+925.660156689" watchObservedRunningTime="2026-01-05 22:06:34.084795712 +0000 UTC m=+925.662293382" Jan 05 22:06:35 crc kubenswrapper[4910]: I0105 22:06:35.835776 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-lch75"] Jan 05 22:06:36 crc kubenswrapper[4910]: I0105 22:06:36.080363 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-lch75" podUID="9d903f96-6c0d-45b7-a304-4d0e870fd392" containerName="registry-server" containerID="cri-o://e7b241527e06032bc6e82eb2ea125fc374473d8d9fe9fff3b695780d2442ca37" gracePeriod=2 Jan 05 22:06:36 crc kubenswrapper[4910]: I0105 22:06:36.448280 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-gfvrk"] Jan 05 22:06:36 crc kubenswrapper[4910]: I0105 22:06:36.449713 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-gfvrk" Jan 05 22:06:36 crc kubenswrapper[4910]: I0105 22:06:36.457455 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-gfvrk"] Jan 05 22:06:36 crc kubenswrapper[4910]: I0105 22:06:36.565904 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-lch75" Jan 05 22:06:36 crc kubenswrapper[4910]: I0105 22:06:36.634222 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rfswl\" (UniqueName: \"kubernetes.io/projected/1ce59020-e2a4-4ba7-83cc-e080410e62d2-kube-api-access-rfswl\") pod \"openstack-operator-index-gfvrk\" (UID: \"1ce59020-e2a4-4ba7-83cc-e080410e62d2\") " pod="openstack-operators/openstack-operator-index-gfvrk" Jan 05 22:06:36 crc kubenswrapper[4910]: I0105 22:06:36.735473 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rqb7l\" (UniqueName: \"kubernetes.io/projected/9d903f96-6c0d-45b7-a304-4d0e870fd392-kube-api-access-rqb7l\") pod \"9d903f96-6c0d-45b7-a304-4d0e870fd392\" (UID: \"9d903f96-6c0d-45b7-a304-4d0e870fd392\") " Jan 05 22:06:36 crc kubenswrapper[4910]: I0105 22:06:36.736188 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rfswl\" (UniqueName: \"kubernetes.io/projected/1ce59020-e2a4-4ba7-83cc-e080410e62d2-kube-api-access-rfswl\") pod \"openstack-operator-index-gfvrk\" (UID: \"1ce59020-e2a4-4ba7-83cc-e080410e62d2\") " pod="openstack-operators/openstack-operator-index-gfvrk" Jan 05 22:06:36 crc kubenswrapper[4910]: I0105 22:06:36.742462 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d903f96-6c0d-45b7-a304-4d0e870fd392-kube-api-access-rqb7l" (OuterVolumeSpecName: "kube-api-access-rqb7l") pod "9d903f96-6c0d-45b7-a304-4d0e870fd392" (UID: "9d903f96-6c0d-45b7-a304-4d0e870fd392"). InnerVolumeSpecName "kube-api-access-rqb7l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:06:36 crc kubenswrapper[4910]: I0105 22:06:36.754935 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rfswl\" (UniqueName: \"kubernetes.io/projected/1ce59020-e2a4-4ba7-83cc-e080410e62d2-kube-api-access-rfswl\") pod \"openstack-operator-index-gfvrk\" (UID: \"1ce59020-e2a4-4ba7-83cc-e080410e62d2\") " pod="openstack-operators/openstack-operator-index-gfvrk" Jan 05 22:06:36 crc kubenswrapper[4910]: I0105 22:06:36.780429 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-gfvrk" Jan 05 22:06:36 crc kubenswrapper[4910]: I0105 22:06:36.838640 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rqb7l\" (UniqueName: \"kubernetes.io/projected/9d903f96-6c0d-45b7-a304-4d0e870fd392-kube-api-access-rqb7l\") on node \"crc\" DevicePath \"\"" Jan 05 22:06:37 crc kubenswrapper[4910]: I0105 22:06:37.093317 4910 generic.go:334] "Generic (PLEG): container finished" podID="9d903f96-6c0d-45b7-a304-4d0e870fd392" containerID="e7b241527e06032bc6e82eb2ea125fc374473d8d9fe9fff3b695780d2442ca37" exitCode=0 Jan 05 22:06:37 crc kubenswrapper[4910]: I0105 22:06:37.093372 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-lch75" event={"ID":"9d903f96-6c0d-45b7-a304-4d0e870fd392","Type":"ContainerDied","Data":"e7b241527e06032bc6e82eb2ea125fc374473d8d9fe9fff3b695780d2442ca37"} Jan 05 22:06:37 crc kubenswrapper[4910]: I0105 22:06:37.093381 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-lch75" Jan 05 22:06:37 crc kubenswrapper[4910]: I0105 22:06:37.093415 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-lch75" event={"ID":"9d903f96-6c0d-45b7-a304-4d0e870fd392","Type":"ContainerDied","Data":"92d0e7b7d3047e8d62e702532178be5a877b0b4f91fb76ad147227c4f5a97efa"} Jan 05 22:06:37 crc kubenswrapper[4910]: I0105 22:06:37.093452 4910 scope.go:117] "RemoveContainer" containerID="e7b241527e06032bc6e82eb2ea125fc374473d8d9fe9fff3b695780d2442ca37" Jan 05 22:06:37 crc kubenswrapper[4910]: I0105 22:06:37.128658 4910 scope.go:117] "RemoveContainer" containerID="e7b241527e06032bc6e82eb2ea125fc374473d8d9fe9fff3b695780d2442ca37" Jan 05 22:06:37 crc kubenswrapper[4910]: E0105 22:06:37.130498 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e7b241527e06032bc6e82eb2ea125fc374473d8d9fe9fff3b695780d2442ca37\": container with ID starting with e7b241527e06032bc6e82eb2ea125fc374473d8d9fe9fff3b695780d2442ca37 not found: ID does not exist" containerID="e7b241527e06032bc6e82eb2ea125fc374473d8d9fe9fff3b695780d2442ca37" Jan 05 22:06:37 crc kubenswrapper[4910]: I0105 22:06:37.130568 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7b241527e06032bc6e82eb2ea125fc374473d8d9fe9fff3b695780d2442ca37"} err="failed to get container status \"e7b241527e06032bc6e82eb2ea125fc374473d8d9fe9fff3b695780d2442ca37\": rpc error: code = NotFound desc = could not find container \"e7b241527e06032bc6e82eb2ea125fc374473d8d9fe9fff3b695780d2442ca37\": container with ID starting with e7b241527e06032bc6e82eb2ea125fc374473d8d9fe9fff3b695780d2442ca37 not found: ID does not exist" Jan 05 22:06:37 crc kubenswrapper[4910]: I0105 22:06:37.139004 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-lch75"] Jan 05 22:06:37 crc kubenswrapper[4910]: I0105 22:06:37.144428 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-lch75"] Jan 05 22:06:37 crc kubenswrapper[4910]: I0105 22:06:37.229377 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-gfvrk"] Jan 05 22:06:37 crc kubenswrapper[4910]: W0105 22:06:37.236004 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1ce59020_e2a4_4ba7_83cc_e080410e62d2.slice/crio-1c8c35281ca6def452d368e8280acfd1b596dae9d48040b9b6644ff10b854c60 WatchSource:0}: Error finding container 1c8c35281ca6def452d368e8280acfd1b596dae9d48040b9b6644ff10b854c60: Status 404 returned error can't find the container with id 1c8c35281ca6def452d368e8280acfd1b596dae9d48040b9b6644ff10b854c60 Jan 05 22:06:38 crc kubenswrapper[4910]: I0105 22:06:38.101048 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-gfvrk" event={"ID":"1ce59020-e2a4-4ba7-83cc-e080410e62d2","Type":"ContainerStarted","Data":"1c8c35281ca6def452d368e8280acfd1b596dae9d48040b9b6644ff10b854c60"} Jan 05 22:06:38 crc kubenswrapper[4910]: I0105 22:06:38.729440 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d903f96-6c0d-45b7-a304-4d0e870fd392" path="/var/lib/kubelet/pods/9d903f96-6c0d-45b7-a304-4d0e870fd392/volumes" Jan 05 22:06:39 crc kubenswrapper[4910]: I0105 22:06:39.111071 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-gfvrk" event={"ID":"1ce59020-e2a4-4ba7-83cc-e080410e62d2","Type":"ContainerStarted","Data":"7f08fa500c75ae4df647602f5b17026bae6fae4c989d620254a352cde752d870"} Jan 05 22:06:39 crc kubenswrapper[4910]: I0105 22:06:39.133629 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-gfvrk" podStartSLOduration=1.630305527 podStartE2EDuration="3.133600779s" podCreationTimestamp="2026-01-05 22:06:36 +0000 UTC" firstStartedPulling="2026-01-05 22:06:37.241492055 +0000 UTC m=+928.818989725" lastFinishedPulling="2026-01-05 22:06:38.744787297 +0000 UTC m=+930.322284977" observedRunningTime="2026-01-05 22:06:39.124965206 +0000 UTC m=+930.702462876" watchObservedRunningTime="2026-01-05 22:06:39.133600779 +0000 UTC m=+930.711098479" Jan 05 22:06:44 crc kubenswrapper[4910]: I0105 22:06:44.251799 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-xmbnb"] Jan 05 22:06:44 crc kubenswrapper[4910]: E0105 22:06:44.252673 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d903f96-6c0d-45b7-a304-4d0e870fd392" containerName="registry-server" Jan 05 22:06:44 crc kubenswrapper[4910]: I0105 22:06:44.252694 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d903f96-6c0d-45b7-a304-4d0e870fd392" containerName="registry-server" Jan 05 22:06:44 crc kubenswrapper[4910]: I0105 22:06:44.252906 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d903f96-6c0d-45b7-a304-4d0e870fd392" containerName="registry-server" Jan 05 22:06:44 crc kubenswrapper[4910]: I0105 22:06:44.255314 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xmbnb" Jan 05 22:06:44 crc kubenswrapper[4910]: I0105 22:06:44.270313 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xmbnb"] Jan 05 22:06:44 crc kubenswrapper[4910]: I0105 22:06:44.376161 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba69a28a-80f9-4fd2-a457-22685acaeeb1-catalog-content\") pod \"redhat-marketplace-xmbnb\" (UID: \"ba69a28a-80f9-4fd2-a457-22685acaeeb1\") " pod="openshift-marketplace/redhat-marketplace-xmbnb" Jan 05 22:06:44 crc kubenswrapper[4910]: I0105 22:06:44.376386 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h2nbj\" (UniqueName: \"kubernetes.io/projected/ba69a28a-80f9-4fd2-a457-22685acaeeb1-kube-api-access-h2nbj\") pod \"redhat-marketplace-xmbnb\" (UID: \"ba69a28a-80f9-4fd2-a457-22685acaeeb1\") " pod="openshift-marketplace/redhat-marketplace-xmbnb" Jan 05 22:06:44 crc kubenswrapper[4910]: I0105 22:06:44.376497 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba69a28a-80f9-4fd2-a457-22685acaeeb1-utilities\") pod \"redhat-marketplace-xmbnb\" (UID: \"ba69a28a-80f9-4fd2-a457-22685acaeeb1\") " pod="openshift-marketplace/redhat-marketplace-xmbnb" Jan 05 22:06:44 crc kubenswrapper[4910]: I0105 22:06:44.478228 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba69a28a-80f9-4fd2-a457-22685acaeeb1-catalog-content\") pod \"redhat-marketplace-xmbnb\" (UID: \"ba69a28a-80f9-4fd2-a457-22685acaeeb1\") " pod="openshift-marketplace/redhat-marketplace-xmbnb" Jan 05 22:06:44 crc kubenswrapper[4910]: I0105 22:06:44.478325 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h2nbj\" (UniqueName: \"kubernetes.io/projected/ba69a28a-80f9-4fd2-a457-22685acaeeb1-kube-api-access-h2nbj\") pod \"redhat-marketplace-xmbnb\" (UID: \"ba69a28a-80f9-4fd2-a457-22685acaeeb1\") " pod="openshift-marketplace/redhat-marketplace-xmbnb" Jan 05 22:06:44 crc kubenswrapper[4910]: I0105 22:06:44.478384 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba69a28a-80f9-4fd2-a457-22685acaeeb1-utilities\") pod \"redhat-marketplace-xmbnb\" (UID: \"ba69a28a-80f9-4fd2-a457-22685acaeeb1\") " pod="openshift-marketplace/redhat-marketplace-xmbnb" Jan 05 22:06:44 crc kubenswrapper[4910]: I0105 22:06:44.478757 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba69a28a-80f9-4fd2-a457-22685acaeeb1-catalog-content\") pod \"redhat-marketplace-xmbnb\" (UID: \"ba69a28a-80f9-4fd2-a457-22685acaeeb1\") " pod="openshift-marketplace/redhat-marketplace-xmbnb" Jan 05 22:06:44 crc kubenswrapper[4910]: I0105 22:06:44.479019 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba69a28a-80f9-4fd2-a457-22685acaeeb1-utilities\") pod \"redhat-marketplace-xmbnb\" (UID: \"ba69a28a-80f9-4fd2-a457-22685acaeeb1\") " pod="openshift-marketplace/redhat-marketplace-xmbnb" Jan 05 22:06:44 crc kubenswrapper[4910]: I0105 22:06:44.517552 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h2nbj\" (UniqueName: \"kubernetes.io/projected/ba69a28a-80f9-4fd2-a457-22685acaeeb1-kube-api-access-h2nbj\") pod \"redhat-marketplace-xmbnb\" (UID: \"ba69a28a-80f9-4fd2-a457-22685acaeeb1\") " pod="openshift-marketplace/redhat-marketplace-xmbnb" Jan 05 22:06:44 crc kubenswrapper[4910]: I0105 22:06:44.576182 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xmbnb" Jan 05 22:06:44 crc kubenswrapper[4910]: I0105 22:06:44.825938 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xmbnb"] Jan 05 22:06:45 crc kubenswrapper[4910]: I0105 22:06:45.162310 4910 generic.go:334] "Generic (PLEG): container finished" podID="ba69a28a-80f9-4fd2-a457-22685acaeeb1" containerID="d9331293cb5014344133663967b29d7ad02ff3bdffc48a440f67563ebe2639f7" exitCode=0 Jan 05 22:06:45 crc kubenswrapper[4910]: I0105 22:06:45.162514 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xmbnb" event={"ID":"ba69a28a-80f9-4fd2-a457-22685acaeeb1","Type":"ContainerDied","Data":"d9331293cb5014344133663967b29d7ad02ff3bdffc48a440f67563ebe2639f7"} Jan 05 22:06:45 crc kubenswrapper[4910]: I0105 22:06:45.162749 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xmbnb" event={"ID":"ba69a28a-80f9-4fd2-a457-22685acaeeb1","Type":"ContainerStarted","Data":"7bb8567b4c5a6abdbeef789377f139318b52029b86b5d0e62ab061f4b5b69296"} Jan 05 22:06:46 crc kubenswrapper[4910]: I0105 22:06:46.176548 4910 generic.go:334] "Generic (PLEG): container finished" podID="ba69a28a-80f9-4fd2-a457-22685acaeeb1" containerID="c423e67e10bac9ce0bf994483ed6e0f70057968878b54c02449981b476e53b88" exitCode=0 Jan 05 22:06:46 crc kubenswrapper[4910]: I0105 22:06:46.176833 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xmbnb" event={"ID":"ba69a28a-80f9-4fd2-a457-22685acaeeb1","Type":"ContainerDied","Data":"c423e67e10bac9ce0bf994483ed6e0f70057968878b54c02449981b476e53b88"} Jan 05 22:06:46 crc kubenswrapper[4910]: I0105 22:06:46.780966 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-gfvrk" Jan 05 22:06:46 crc kubenswrapper[4910]: I0105 22:06:46.781281 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-gfvrk" Jan 05 22:06:46 crc kubenswrapper[4910]: I0105 22:06:46.816634 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-gfvrk" Jan 05 22:06:47 crc kubenswrapper[4910]: I0105 22:06:47.185475 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xmbnb" event={"ID":"ba69a28a-80f9-4fd2-a457-22685acaeeb1","Type":"ContainerStarted","Data":"2bd022c0b0dda6fbc448d8e3b935be9ff52db8725ef976492d0d15ba0406c7da"} Jan 05 22:06:47 crc kubenswrapper[4910]: I0105 22:06:47.212605 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-xmbnb" podStartSLOduration=1.69558938 podStartE2EDuration="3.212583661s" podCreationTimestamp="2026-01-05 22:06:44 +0000 UTC" firstStartedPulling="2026-01-05 22:06:45.164174585 +0000 UTC m=+936.741672255" lastFinishedPulling="2026-01-05 22:06:46.681168836 +0000 UTC m=+938.258666536" observedRunningTime="2026-01-05 22:06:47.210026688 +0000 UTC m=+938.787524358" watchObservedRunningTime="2026-01-05 22:06:47.212583661 +0000 UTC m=+938.790081331" Jan 05 22:06:47 crc kubenswrapper[4910]: I0105 22:06:47.219589 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-gfvrk" Jan 05 22:06:53 crc kubenswrapper[4910]: I0105 22:06:53.892196 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw"] Jan 05 22:06:53 crc kubenswrapper[4910]: I0105 22:06:53.894293 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw" Jan 05 22:06:53 crc kubenswrapper[4910]: I0105 22:06:53.897384 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-htxpz" Jan 05 22:06:53 crc kubenswrapper[4910]: I0105 22:06:53.906708 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw"] Jan 05 22:06:54 crc kubenswrapper[4910]: I0105 22:06:54.043468 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d1097843-51dd-4524-958e-0c9322ec6600-util\") pod \"c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw\" (UID: \"d1097843-51dd-4524-958e-0c9322ec6600\") " pod="openstack-operators/c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw" Jan 05 22:06:54 crc kubenswrapper[4910]: I0105 22:06:54.043561 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ngr64\" (UniqueName: \"kubernetes.io/projected/d1097843-51dd-4524-958e-0c9322ec6600-kube-api-access-ngr64\") pod \"c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw\" (UID: \"d1097843-51dd-4524-958e-0c9322ec6600\") " pod="openstack-operators/c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw" Jan 05 22:06:54 crc kubenswrapper[4910]: I0105 22:06:54.043656 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d1097843-51dd-4524-958e-0c9322ec6600-bundle\") pod \"c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw\" (UID: \"d1097843-51dd-4524-958e-0c9322ec6600\") " pod="openstack-operators/c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw" Jan 05 22:06:54 crc kubenswrapper[4910]: I0105 22:06:54.145168 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d1097843-51dd-4524-958e-0c9322ec6600-bundle\") pod \"c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw\" (UID: \"d1097843-51dd-4524-958e-0c9322ec6600\") " pod="openstack-operators/c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw" Jan 05 22:06:54 crc kubenswrapper[4910]: I0105 22:06:54.145231 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d1097843-51dd-4524-958e-0c9322ec6600-util\") pod \"c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw\" (UID: \"d1097843-51dd-4524-958e-0c9322ec6600\") " pod="openstack-operators/c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw" Jan 05 22:06:54 crc kubenswrapper[4910]: I0105 22:06:54.145278 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ngr64\" (UniqueName: \"kubernetes.io/projected/d1097843-51dd-4524-958e-0c9322ec6600-kube-api-access-ngr64\") pod \"c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw\" (UID: \"d1097843-51dd-4524-958e-0c9322ec6600\") " pod="openstack-operators/c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw" Jan 05 22:06:54 crc kubenswrapper[4910]: I0105 22:06:54.145921 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d1097843-51dd-4524-958e-0c9322ec6600-bundle\") pod \"c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw\" (UID: \"d1097843-51dd-4524-958e-0c9322ec6600\") " pod="openstack-operators/c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw" Jan 05 22:06:54 crc kubenswrapper[4910]: I0105 22:06:54.146080 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d1097843-51dd-4524-958e-0c9322ec6600-util\") pod \"c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw\" (UID: \"d1097843-51dd-4524-958e-0c9322ec6600\") " pod="openstack-operators/c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw" Jan 05 22:06:54 crc kubenswrapper[4910]: I0105 22:06:54.176296 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ngr64\" (UniqueName: \"kubernetes.io/projected/d1097843-51dd-4524-958e-0c9322ec6600-kube-api-access-ngr64\") pod \"c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw\" (UID: \"d1097843-51dd-4524-958e-0c9322ec6600\") " pod="openstack-operators/c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw" Jan 05 22:06:54 crc kubenswrapper[4910]: I0105 22:06:54.231320 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw" Jan 05 22:06:54 crc kubenswrapper[4910]: I0105 22:06:54.427855 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw"] Jan 05 22:06:54 crc kubenswrapper[4910]: I0105 22:06:54.576665 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-xmbnb" Jan 05 22:06:54 crc kubenswrapper[4910]: I0105 22:06:54.576733 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-xmbnb" Jan 05 22:06:54 crc kubenswrapper[4910]: I0105 22:06:54.637435 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-xmbnb" Jan 05 22:06:55 crc kubenswrapper[4910]: I0105 22:06:55.252409 4910 generic.go:334] "Generic (PLEG): container finished" podID="d1097843-51dd-4524-958e-0c9322ec6600" containerID="8208397e2f4afc2fc43e44f5ad1f6530e76e66aa0d56088099b874839e9f5385" exitCode=0 Jan 05 22:06:55 crc kubenswrapper[4910]: I0105 22:06:55.252518 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw" event={"ID":"d1097843-51dd-4524-958e-0c9322ec6600","Type":"ContainerDied","Data":"8208397e2f4afc2fc43e44f5ad1f6530e76e66aa0d56088099b874839e9f5385"} Jan 05 22:06:55 crc kubenswrapper[4910]: I0105 22:06:55.253165 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw" event={"ID":"d1097843-51dd-4524-958e-0c9322ec6600","Type":"ContainerStarted","Data":"bee3c8244774cf4424309788ab77c717e39c0a406847a38533d9d4f6b0584832"} Jan 05 22:06:55 crc kubenswrapper[4910]: I0105 22:06:55.333153 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-xmbnb" Jan 05 22:06:56 crc kubenswrapper[4910]: I0105 22:06:56.259485 4910 generic.go:334] "Generic (PLEG): container finished" podID="d1097843-51dd-4524-958e-0c9322ec6600" containerID="dbd93ae8981698bc8c8f5d500763f7234aed7f158ebe3d41a0f86df759b4bdfb" exitCode=0 Jan 05 22:06:56 crc kubenswrapper[4910]: I0105 22:06:56.260755 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw" event={"ID":"d1097843-51dd-4524-958e-0c9322ec6600","Type":"ContainerDied","Data":"dbd93ae8981698bc8c8f5d500763f7234aed7f158ebe3d41a0f86df759b4bdfb"} Jan 05 22:06:57 crc kubenswrapper[4910]: I0105 22:06:57.268758 4910 generic.go:334] "Generic (PLEG): container finished" podID="d1097843-51dd-4524-958e-0c9322ec6600" containerID="e5704a8a0875baf03142df8674ed8e2e18320e2eee355c0aab81adaab609295d" exitCode=0 Jan 05 22:06:57 crc kubenswrapper[4910]: I0105 22:06:57.268851 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw" event={"ID":"d1097843-51dd-4524-958e-0c9322ec6600","Type":"ContainerDied","Data":"e5704a8a0875baf03142df8674ed8e2e18320e2eee355c0aab81adaab609295d"} Jan 05 22:06:58 crc kubenswrapper[4910]: I0105 22:06:58.235022 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xmbnb"] Jan 05 22:06:58 crc kubenswrapper[4910]: I0105 22:06:58.235368 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-xmbnb" podUID="ba69a28a-80f9-4fd2-a457-22685acaeeb1" containerName="registry-server" containerID="cri-o://2bd022c0b0dda6fbc448d8e3b935be9ff52db8725ef976492d0d15ba0406c7da" gracePeriod=2 Jan 05 22:06:58 crc kubenswrapper[4910]: I0105 22:06:58.586416 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw" Jan 05 22:06:58 crc kubenswrapper[4910]: I0105 22:06:58.668801 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xmbnb" Jan 05 22:06:58 crc kubenswrapper[4910]: I0105 22:06:58.714039 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngr64\" (UniqueName: \"kubernetes.io/projected/d1097843-51dd-4524-958e-0c9322ec6600-kube-api-access-ngr64\") pod \"d1097843-51dd-4524-958e-0c9322ec6600\" (UID: \"d1097843-51dd-4524-958e-0c9322ec6600\") " Jan 05 22:06:58 crc kubenswrapper[4910]: I0105 22:06:58.714169 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d1097843-51dd-4524-958e-0c9322ec6600-util\") pod \"d1097843-51dd-4524-958e-0c9322ec6600\" (UID: \"d1097843-51dd-4524-958e-0c9322ec6600\") " Jan 05 22:06:58 crc kubenswrapper[4910]: I0105 22:06:58.714199 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d1097843-51dd-4524-958e-0c9322ec6600-bundle\") pod \"d1097843-51dd-4524-958e-0c9322ec6600\" (UID: \"d1097843-51dd-4524-958e-0c9322ec6600\") " Jan 05 22:06:58 crc kubenswrapper[4910]: I0105 22:06:58.715160 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d1097843-51dd-4524-958e-0c9322ec6600-bundle" (OuterVolumeSpecName: "bundle") pod "d1097843-51dd-4524-958e-0c9322ec6600" (UID: "d1097843-51dd-4524-958e-0c9322ec6600"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:06:58 crc kubenswrapper[4910]: I0105 22:06:58.725565 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1097843-51dd-4524-958e-0c9322ec6600-kube-api-access-ngr64" (OuterVolumeSpecName: "kube-api-access-ngr64") pod "d1097843-51dd-4524-958e-0c9322ec6600" (UID: "d1097843-51dd-4524-958e-0c9322ec6600"). InnerVolumeSpecName "kube-api-access-ngr64". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:06:58 crc kubenswrapper[4910]: I0105 22:06:58.740649 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d1097843-51dd-4524-958e-0c9322ec6600-util" (OuterVolumeSpecName: "util") pod "d1097843-51dd-4524-958e-0c9322ec6600" (UID: "d1097843-51dd-4524-958e-0c9322ec6600"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:06:58 crc kubenswrapper[4910]: I0105 22:06:58.815898 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h2nbj\" (UniqueName: \"kubernetes.io/projected/ba69a28a-80f9-4fd2-a457-22685acaeeb1-kube-api-access-h2nbj\") pod \"ba69a28a-80f9-4fd2-a457-22685acaeeb1\" (UID: \"ba69a28a-80f9-4fd2-a457-22685acaeeb1\") " Jan 05 22:06:58 crc kubenswrapper[4910]: I0105 22:06:58.815968 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba69a28a-80f9-4fd2-a457-22685acaeeb1-utilities\") pod \"ba69a28a-80f9-4fd2-a457-22685acaeeb1\" (UID: \"ba69a28a-80f9-4fd2-a457-22685acaeeb1\") " Jan 05 22:06:58 crc kubenswrapper[4910]: I0105 22:06:58.816081 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba69a28a-80f9-4fd2-a457-22685acaeeb1-catalog-content\") pod \"ba69a28a-80f9-4fd2-a457-22685acaeeb1\" (UID: \"ba69a28a-80f9-4fd2-a457-22685acaeeb1\") " Jan 05 22:06:58 crc kubenswrapper[4910]: I0105 22:06:58.816511 4910 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d1097843-51dd-4524-958e-0c9322ec6600-util\") on node \"crc\" DevicePath \"\"" Jan 05 22:06:58 crc kubenswrapper[4910]: I0105 22:06:58.816552 4910 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d1097843-51dd-4524-958e-0c9322ec6600-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:06:58 crc kubenswrapper[4910]: I0105 22:06:58.816570 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngr64\" (UniqueName: \"kubernetes.io/projected/d1097843-51dd-4524-958e-0c9322ec6600-kube-api-access-ngr64\") on node \"crc\" DevicePath \"\"" Jan 05 22:06:58 crc kubenswrapper[4910]: I0105 22:06:58.817251 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba69a28a-80f9-4fd2-a457-22685acaeeb1-utilities" (OuterVolumeSpecName: "utilities") pod "ba69a28a-80f9-4fd2-a457-22685acaeeb1" (UID: "ba69a28a-80f9-4fd2-a457-22685acaeeb1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:06:58 crc kubenswrapper[4910]: I0105 22:06:58.821317 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba69a28a-80f9-4fd2-a457-22685acaeeb1-kube-api-access-h2nbj" (OuterVolumeSpecName: "kube-api-access-h2nbj") pod "ba69a28a-80f9-4fd2-a457-22685acaeeb1" (UID: "ba69a28a-80f9-4fd2-a457-22685acaeeb1"). InnerVolumeSpecName "kube-api-access-h2nbj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:06:58 crc kubenswrapper[4910]: I0105 22:06:58.835510 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba69a28a-80f9-4fd2-a457-22685acaeeb1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ba69a28a-80f9-4fd2-a457-22685acaeeb1" (UID: "ba69a28a-80f9-4fd2-a457-22685acaeeb1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:06:58 crc kubenswrapper[4910]: I0105 22:06:58.917629 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h2nbj\" (UniqueName: \"kubernetes.io/projected/ba69a28a-80f9-4fd2-a457-22685acaeeb1-kube-api-access-h2nbj\") on node \"crc\" DevicePath \"\"" Jan 05 22:06:58 crc kubenswrapper[4910]: I0105 22:06:58.917671 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba69a28a-80f9-4fd2-a457-22685acaeeb1-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 22:06:58 crc kubenswrapper[4910]: I0105 22:06:58.917685 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba69a28a-80f9-4fd2-a457-22685acaeeb1-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 22:06:59 crc kubenswrapper[4910]: I0105 22:06:59.286546 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw" event={"ID":"d1097843-51dd-4524-958e-0c9322ec6600","Type":"ContainerDied","Data":"bee3c8244774cf4424309788ab77c717e39c0a406847a38533d9d4f6b0584832"} Jan 05 22:06:59 crc kubenswrapper[4910]: I0105 22:06:59.287086 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bee3c8244774cf4424309788ab77c717e39c0a406847a38533d9d4f6b0584832" Jan 05 22:06:59 crc kubenswrapper[4910]: I0105 22:06:59.286636 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw" Jan 05 22:06:59 crc kubenswrapper[4910]: I0105 22:06:59.313056 4910 generic.go:334] "Generic (PLEG): container finished" podID="ba69a28a-80f9-4fd2-a457-22685acaeeb1" containerID="2bd022c0b0dda6fbc448d8e3b935be9ff52db8725ef976492d0d15ba0406c7da" exitCode=0 Jan 05 22:06:59 crc kubenswrapper[4910]: I0105 22:06:59.313161 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xmbnb" event={"ID":"ba69a28a-80f9-4fd2-a457-22685acaeeb1","Type":"ContainerDied","Data":"2bd022c0b0dda6fbc448d8e3b935be9ff52db8725ef976492d0d15ba0406c7da"} Jan 05 22:06:59 crc kubenswrapper[4910]: I0105 22:06:59.313209 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xmbnb" event={"ID":"ba69a28a-80f9-4fd2-a457-22685acaeeb1","Type":"ContainerDied","Data":"7bb8567b4c5a6abdbeef789377f139318b52029b86b5d0e62ab061f4b5b69296"} Jan 05 22:06:59 crc kubenswrapper[4910]: I0105 22:06:59.313243 4910 scope.go:117] "RemoveContainer" containerID="2bd022c0b0dda6fbc448d8e3b935be9ff52db8725ef976492d0d15ba0406c7da" Jan 05 22:06:59 crc kubenswrapper[4910]: I0105 22:06:59.313426 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xmbnb" Jan 05 22:06:59 crc kubenswrapper[4910]: I0105 22:06:59.337340 4910 scope.go:117] "RemoveContainer" containerID="c423e67e10bac9ce0bf994483ed6e0f70057968878b54c02449981b476e53b88" Jan 05 22:06:59 crc kubenswrapper[4910]: I0105 22:06:59.353460 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xmbnb"] Jan 05 22:06:59 crc kubenswrapper[4910]: I0105 22:06:59.359310 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-xmbnb"] Jan 05 22:06:59 crc kubenswrapper[4910]: I0105 22:06:59.374681 4910 scope.go:117] "RemoveContainer" containerID="d9331293cb5014344133663967b29d7ad02ff3bdffc48a440f67563ebe2639f7" Jan 05 22:06:59 crc kubenswrapper[4910]: I0105 22:06:59.398410 4910 scope.go:117] "RemoveContainer" containerID="2bd022c0b0dda6fbc448d8e3b935be9ff52db8725ef976492d0d15ba0406c7da" Jan 05 22:06:59 crc kubenswrapper[4910]: E0105 22:06:59.398923 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2bd022c0b0dda6fbc448d8e3b935be9ff52db8725ef976492d0d15ba0406c7da\": container with ID starting with 2bd022c0b0dda6fbc448d8e3b935be9ff52db8725ef976492d0d15ba0406c7da not found: ID does not exist" containerID="2bd022c0b0dda6fbc448d8e3b935be9ff52db8725ef976492d0d15ba0406c7da" Jan 05 22:06:59 crc kubenswrapper[4910]: I0105 22:06:59.398981 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2bd022c0b0dda6fbc448d8e3b935be9ff52db8725ef976492d0d15ba0406c7da"} err="failed to get container status \"2bd022c0b0dda6fbc448d8e3b935be9ff52db8725ef976492d0d15ba0406c7da\": rpc error: code = NotFound desc = could not find container \"2bd022c0b0dda6fbc448d8e3b935be9ff52db8725ef976492d0d15ba0406c7da\": container with ID starting with 2bd022c0b0dda6fbc448d8e3b935be9ff52db8725ef976492d0d15ba0406c7da not found: ID does not exist" Jan 05 22:06:59 crc kubenswrapper[4910]: I0105 22:06:59.399025 4910 scope.go:117] "RemoveContainer" containerID="c423e67e10bac9ce0bf994483ed6e0f70057968878b54c02449981b476e53b88" Jan 05 22:06:59 crc kubenswrapper[4910]: E0105 22:06:59.399422 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c423e67e10bac9ce0bf994483ed6e0f70057968878b54c02449981b476e53b88\": container with ID starting with c423e67e10bac9ce0bf994483ed6e0f70057968878b54c02449981b476e53b88 not found: ID does not exist" containerID="c423e67e10bac9ce0bf994483ed6e0f70057968878b54c02449981b476e53b88" Jan 05 22:06:59 crc kubenswrapper[4910]: I0105 22:06:59.399469 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c423e67e10bac9ce0bf994483ed6e0f70057968878b54c02449981b476e53b88"} err="failed to get container status \"c423e67e10bac9ce0bf994483ed6e0f70057968878b54c02449981b476e53b88\": rpc error: code = NotFound desc = could not find container \"c423e67e10bac9ce0bf994483ed6e0f70057968878b54c02449981b476e53b88\": container with ID starting with c423e67e10bac9ce0bf994483ed6e0f70057968878b54c02449981b476e53b88 not found: ID does not exist" Jan 05 22:06:59 crc kubenswrapper[4910]: I0105 22:06:59.399508 4910 scope.go:117] "RemoveContainer" containerID="d9331293cb5014344133663967b29d7ad02ff3bdffc48a440f67563ebe2639f7" Jan 05 22:06:59 crc kubenswrapper[4910]: E0105 22:06:59.400312 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9331293cb5014344133663967b29d7ad02ff3bdffc48a440f67563ebe2639f7\": container with ID starting with d9331293cb5014344133663967b29d7ad02ff3bdffc48a440f67563ebe2639f7 not found: ID does not exist" containerID="d9331293cb5014344133663967b29d7ad02ff3bdffc48a440f67563ebe2639f7" Jan 05 22:06:59 crc kubenswrapper[4910]: I0105 22:06:59.400381 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9331293cb5014344133663967b29d7ad02ff3bdffc48a440f67563ebe2639f7"} err="failed to get container status \"d9331293cb5014344133663967b29d7ad02ff3bdffc48a440f67563ebe2639f7\": rpc error: code = NotFound desc = could not find container \"d9331293cb5014344133663967b29d7ad02ff3bdffc48a440f67563ebe2639f7\": container with ID starting with d9331293cb5014344133663967b29d7ad02ff3bdffc48a440f67563ebe2639f7 not found: ID does not exist" Jan 05 22:07:00 crc kubenswrapper[4910]: I0105 22:07:00.732037 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba69a28a-80f9-4fd2-a457-22685acaeeb1" path="/var/lib/kubelet/pods/ba69a28a-80f9-4fd2-a457-22685acaeeb1/volumes" Jan 05 22:07:04 crc kubenswrapper[4910]: I0105 22:07:04.200816 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5845bc5b8-9pfbx"] Jan 05 22:07:04 crc kubenswrapper[4910]: E0105 22:07:04.201473 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1097843-51dd-4524-958e-0c9322ec6600" containerName="util" Jan 05 22:07:04 crc kubenswrapper[4910]: I0105 22:07:04.201487 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1097843-51dd-4524-958e-0c9322ec6600" containerName="util" Jan 05 22:07:04 crc kubenswrapper[4910]: E0105 22:07:04.201496 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba69a28a-80f9-4fd2-a457-22685acaeeb1" containerName="registry-server" Jan 05 22:07:04 crc kubenswrapper[4910]: I0105 22:07:04.201502 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba69a28a-80f9-4fd2-a457-22685acaeeb1" containerName="registry-server" Jan 05 22:07:04 crc kubenswrapper[4910]: E0105 22:07:04.201509 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba69a28a-80f9-4fd2-a457-22685acaeeb1" containerName="extract-content" Jan 05 22:07:04 crc kubenswrapper[4910]: I0105 22:07:04.201518 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba69a28a-80f9-4fd2-a457-22685acaeeb1" containerName="extract-content" Jan 05 22:07:04 crc kubenswrapper[4910]: E0105 22:07:04.201532 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1097843-51dd-4524-958e-0c9322ec6600" containerName="pull" Jan 05 22:07:04 crc kubenswrapper[4910]: I0105 22:07:04.201539 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1097843-51dd-4524-958e-0c9322ec6600" containerName="pull" Jan 05 22:07:04 crc kubenswrapper[4910]: E0105 22:07:04.201551 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba69a28a-80f9-4fd2-a457-22685acaeeb1" containerName="extract-utilities" Jan 05 22:07:04 crc kubenswrapper[4910]: I0105 22:07:04.201556 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba69a28a-80f9-4fd2-a457-22685acaeeb1" containerName="extract-utilities" Jan 05 22:07:04 crc kubenswrapper[4910]: E0105 22:07:04.201568 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1097843-51dd-4524-958e-0c9322ec6600" containerName="extract" Jan 05 22:07:04 crc kubenswrapper[4910]: I0105 22:07:04.201574 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1097843-51dd-4524-958e-0c9322ec6600" containerName="extract" Jan 05 22:07:04 crc kubenswrapper[4910]: I0105 22:07:04.201702 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba69a28a-80f9-4fd2-a457-22685acaeeb1" containerName="registry-server" Jan 05 22:07:04 crc kubenswrapper[4910]: I0105 22:07:04.201713 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1097843-51dd-4524-958e-0c9322ec6600" containerName="extract" Jan 05 22:07:04 crc kubenswrapper[4910]: I0105 22:07:04.202213 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-5845bc5b8-9pfbx" Jan 05 22:07:04 crc kubenswrapper[4910]: I0105 22:07:04.211452 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-8lh8z" Jan 05 22:07:04 crc kubenswrapper[4910]: I0105 22:07:04.260936 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5845bc5b8-9pfbx"] Jan 05 22:07:04 crc kubenswrapper[4910]: I0105 22:07:04.314062 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x6vkl\" (UniqueName: \"kubernetes.io/projected/13c24329-71cc-45fb-93fe-94edbec20755-kube-api-access-x6vkl\") pod \"openstack-operator-controller-operator-5845bc5b8-9pfbx\" (UID: \"13c24329-71cc-45fb-93fe-94edbec20755\") " pod="openstack-operators/openstack-operator-controller-operator-5845bc5b8-9pfbx" Jan 05 22:07:04 crc kubenswrapper[4910]: I0105 22:07:04.416369 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6vkl\" (UniqueName: \"kubernetes.io/projected/13c24329-71cc-45fb-93fe-94edbec20755-kube-api-access-x6vkl\") pod \"openstack-operator-controller-operator-5845bc5b8-9pfbx\" (UID: \"13c24329-71cc-45fb-93fe-94edbec20755\") " pod="openstack-operators/openstack-operator-controller-operator-5845bc5b8-9pfbx" Jan 05 22:07:04 crc kubenswrapper[4910]: I0105 22:07:04.439192 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x6vkl\" (UniqueName: \"kubernetes.io/projected/13c24329-71cc-45fb-93fe-94edbec20755-kube-api-access-x6vkl\") pod \"openstack-operator-controller-operator-5845bc5b8-9pfbx\" (UID: \"13c24329-71cc-45fb-93fe-94edbec20755\") " pod="openstack-operators/openstack-operator-controller-operator-5845bc5b8-9pfbx" Jan 05 22:07:04 crc kubenswrapper[4910]: I0105 22:07:04.523042 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-5845bc5b8-9pfbx" Jan 05 22:07:04 crc kubenswrapper[4910]: I0105 22:07:04.996450 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5845bc5b8-9pfbx"] Jan 05 22:07:05 crc kubenswrapper[4910]: I0105 22:07:05.357367 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5845bc5b8-9pfbx" event={"ID":"13c24329-71cc-45fb-93fe-94edbec20755","Type":"ContainerStarted","Data":"f6379bdc880e2aa0206f1117d1e42fdd71e1ae9b7c59aca4f34eda3f7334c025"} Jan 05 22:07:06 crc kubenswrapper[4910]: I0105 22:07:06.841803 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-kgjzr"] Jan 05 22:07:06 crc kubenswrapper[4910]: I0105 22:07:06.843185 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kgjzr" Jan 05 22:07:06 crc kubenswrapper[4910]: I0105 22:07:06.858404 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kgjzr"] Jan 05 22:07:06 crc kubenswrapper[4910]: I0105 22:07:06.966323 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/046dfbab-7fe9-4d13-8cd6-306794250fd2-catalog-content\") pod \"community-operators-kgjzr\" (UID: \"046dfbab-7fe9-4d13-8cd6-306794250fd2\") " pod="openshift-marketplace/community-operators-kgjzr" Jan 05 22:07:06 crc kubenswrapper[4910]: I0105 22:07:06.966413 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26l78\" (UniqueName: \"kubernetes.io/projected/046dfbab-7fe9-4d13-8cd6-306794250fd2-kube-api-access-26l78\") pod \"community-operators-kgjzr\" (UID: \"046dfbab-7fe9-4d13-8cd6-306794250fd2\") " pod="openshift-marketplace/community-operators-kgjzr" Jan 05 22:07:06 crc kubenswrapper[4910]: I0105 22:07:06.966457 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/046dfbab-7fe9-4d13-8cd6-306794250fd2-utilities\") pod \"community-operators-kgjzr\" (UID: \"046dfbab-7fe9-4d13-8cd6-306794250fd2\") " pod="openshift-marketplace/community-operators-kgjzr" Jan 05 22:07:07 crc kubenswrapper[4910]: I0105 22:07:07.068347 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/046dfbab-7fe9-4d13-8cd6-306794250fd2-utilities\") pod \"community-operators-kgjzr\" (UID: \"046dfbab-7fe9-4d13-8cd6-306794250fd2\") " pod="openshift-marketplace/community-operators-kgjzr" Jan 05 22:07:07 crc kubenswrapper[4910]: I0105 22:07:07.068733 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/046dfbab-7fe9-4d13-8cd6-306794250fd2-catalog-content\") pod \"community-operators-kgjzr\" (UID: \"046dfbab-7fe9-4d13-8cd6-306794250fd2\") " pod="openshift-marketplace/community-operators-kgjzr" Jan 05 22:07:07 crc kubenswrapper[4910]: I0105 22:07:07.068786 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26l78\" (UniqueName: \"kubernetes.io/projected/046dfbab-7fe9-4d13-8cd6-306794250fd2-kube-api-access-26l78\") pod \"community-operators-kgjzr\" (UID: \"046dfbab-7fe9-4d13-8cd6-306794250fd2\") " pod="openshift-marketplace/community-operators-kgjzr" Jan 05 22:07:07 crc kubenswrapper[4910]: I0105 22:07:07.069024 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/046dfbab-7fe9-4d13-8cd6-306794250fd2-utilities\") pod \"community-operators-kgjzr\" (UID: \"046dfbab-7fe9-4d13-8cd6-306794250fd2\") " pod="openshift-marketplace/community-operators-kgjzr" Jan 05 22:07:07 crc kubenswrapper[4910]: I0105 22:07:07.069271 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/046dfbab-7fe9-4d13-8cd6-306794250fd2-catalog-content\") pod \"community-operators-kgjzr\" (UID: \"046dfbab-7fe9-4d13-8cd6-306794250fd2\") " pod="openshift-marketplace/community-operators-kgjzr" Jan 05 22:07:07 crc kubenswrapper[4910]: I0105 22:07:07.110838 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26l78\" (UniqueName: \"kubernetes.io/projected/046dfbab-7fe9-4d13-8cd6-306794250fd2-kube-api-access-26l78\") pod \"community-operators-kgjzr\" (UID: \"046dfbab-7fe9-4d13-8cd6-306794250fd2\") " pod="openshift-marketplace/community-operators-kgjzr" Jan 05 22:07:07 crc kubenswrapper[4910]: I0105 22:07:07.167315 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kgjzr" Jan 05 22:07:12 crc kubenswrapper[4910]: I0105 22:07:12.520056 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kgjzr"] Jan 05 22:07:12 crc kubenswrapper[4910]: W0105 22:07:12.524569 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod046dfbab_7fe9_4d13_8cd6_306794250fd2.slice/crio-3fb22236f9d9f211de86c397196ee3446d309c669f5498157eddc50675173f07 WatchSource:0}: Error finding container 3fb22236f9d9f211de86c397196ee3446d309c669f5498157eddc50675173f07: Status 404 returned error can't find the container with id 3fb22236f9d9f211de86c397196ee3446d309c669f5498157eddc50675173f07 Jan 05 22:07:12 crc kubenswrapper[4910]: I0105 22:07:12.871350 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5845bc5b8-9pfbx" event={"ID":"13c24329-71cc-45fb-93fe-94edbec20755","Type":"ContainerStarted","Data":"ccd00a88ea0befe6cd0a400efa8776a8eed1cc6b942425eec709d68a89647d28"} Jan 05 22:07:12 crc kubenswrapper[4910]: I0105 22:07:12.871956 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-5845bc5b8-9pfbx" Jan 05 22:07:12 crc kubenswrapper[4910]: I0105 22:07:12.873606 4910 generic.go:334] "Generic (PLEG): container finished" podID="046dfbab-7fe9-4d13-8cd6-306794250fd2" containerID="1fa1237986121eaeddd9783e462fadbb3c1bbff7ea550a1a71cb88d04f0cfd45" exitCode=0 Jan 05 22:07:12 crc kubenswrapper[4910]: I0105 22:07:12.873681 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kgjzr" event={"ID":"046dfbab-7fe9-4d13-8cd6-306794250fd2","Type":"ContainerDied","Data":"1fa1237986121eaeddd9783e462fadbb3c1bbff7ea550a1a71cb88d04f0cfd45"} Jan 05 22:07:12 crc kubenswrapper[4910]: I0105 22:07:12.873755 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kgjzr" event={"ID":"046dfbab-7fe9-4d13-8cd6-306794250fd2","Type":"ContainerStarted","Data":"3fb22236f9d9f211de86c397196ee3446d309c669f5498157eddc50675173f07"} Jan 05 22:07:12 crc kubenswrapper[4910]: I0105 22:07:12.918476 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-5845bc5b8-9pfbx" podStartSLOduration=1.6213899 podStartE2EDuration="8.918455434s" podCreationTimestamp="2026-01-05 22:07:04 +0000 UTC" firstStartedPulling="2026-01-05 22:07:04.978335653 +0000 UTC m=+956.555833313" lastFinishedPulling="2026-01-05 22:07:12.275401177 +0000 UTC m=+963.852898847" observedRunningTime="2026-01-05 22:07:12.913682066 +0000 UTC m=+964.491179756" watchObservedRunningTime="2026-01-05 22:07:12.918455434 +0000 UTC m=+964.495953094" Jan 05 22:07:14 crc kubenswrapper[4910]: I0105 22:07:14.890418 4910 generic.go:334] "Generic (PLEG): container finished" podID="046dfbab-7fe9-4d13-8cd6-306794250fd2" containerID="0ec388915fb497c1a2a991ca5ef577e601c2ba100c59bb2154aabd8062af7cd6" exitCode=0 Jan 05 22:07:14 crc kubenswrapper[4910]: I0105 22:07:14.890516 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kgjzr" event={"ID":"046dfbab-7fe9-4d13-8cd6-306794250fd2","Type":"ContainerDied","Data":"0ec388915fb497c1a2a991ca5ef577e601c2ba100c59bb2154aabd8062af7cd6"} Jan 05 22:07:15 crc kubenswrapper[4910]: I0105 22:07:15.899927 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kgjzr" event={"ID":"046dfbab-7fe9-4d13-8cd6-306794250fd2","Type":"ContainerStarted","Data":"30bcc5c4ab52ac2251d2d3d819dc132be5d04d44097f311e1e04204c0d344889"} Jan 05 22:07:15 crc kubenswrapper[4910]: I0105 22:07:15.929966 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-kgjzr" podStartSLOduration=7.454576097 podStartE2EDuration="9.929940578s" podCreationTimestamp="2026-01-05 22:07:06 +0000 UTC" firstStartedPulling="2026-01-05 22:07:12.87539107 +0000 UTC m=+964.452888740" lastFinishedPulling="2026-01-05 22:07:15.350755551 +0000 UTC m=+966.928253221" observedRunningTime="2026-01-05 22:07:15.926609815 +0000 UTC m=+967.504107525" watchObservedRunningTime="2026-01-05 22:07:15.929940578 +0000 UTC m=+967.507438268" Jan 05 22:07:17 crc kubenswrapper[4910]: I0105 22:07:17.168297 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-kgjzr" Jan 05 22:07:17 crc kubenswrapper[4910]: I0105 22:07:17.168356 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-kgjzr" Jan 05 22:07:18 crc kubenswrapper[4910]: I0105 22:07:18.215084 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-kgjzr" podUID="046dfbab-7fe9-4d13-8cd6-306794250fd2" containerName="registry-server" probeResult="failure" output=< Jan 05 22:07:18 crc kubenswrapper[4910]: timeout: failed to connect service ":50051" within 1s Jan 05 22:07:18 crc kubenswrapper[4910]: > Jan 05 22:07:24 crc kubenswrapper[4910]: I0105 22:07:24.525477 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-5845bc5b8-9pfbx" Jan 05 22:07:27 crc kubenswrapper[4910]: I0105 22:07:27.234658 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-kgjzr" Jan 05 22:07:27 crc kubenswrapper[4910]: I0105 22:07:27.283010 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-kgjzr" Jan 05 22:07:27 crc kubenswrapper[4910]: I0105 22:07:27.467636 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-kgjzr"] Jan 05 22:07:29 crc kubenswrapper[4910]: I0105 22:07:29.005682 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-kgjzr" podUID="046dfbab-7fe9-4d13-8cd6-306794250fd2" containerName="registry-server" containerID="cri-o://30bcc5c4ab52ac2251d2d3d819dc132be5d04d44097f311e1e04204c0d344889" gracePeriod=2 Jan 05 22:07:29 crc kubenswrapper[4910]: I0105 22:07:29.891013 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-rq9v8"] Jan 05 22:07:29 crc kubenswrapper[4910]: I0105 22:07:29.892337 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rq9v8" Jan 05 22:07:29 crc kubenswrapper[4910]: I0105 22:07:29.901941 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rq9v8"] Jan 05 22:07:30 crc kubenswrapper[4910]: I0105 22:07:30.011255 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5cb07c90-ffc0-481a-87a1-e0e87de5c6e3-utilities\") pod \"certified-operators-rq9v8\" (UID: \"5cb07c90-ffc0-481a-87a1-e0e87de5c6e3\") " pod="openshift-marketplace/certified-operators-rq9v8" Jan 05 22:07:30 crc kubenswrapper[4910]: I0105 22:07:30.011318 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-488hq\" (UniqueName: \"kubernetes.io/projected/5cb07c90-ffc0-481a-87a1-e0e87de5c6e3-kube-api-access-488hq\") pod \"certified-operators-rq9v8\" (UID: \"5cb07c90-ffc0-481a-87a1-e0e87de5c6e3\") " pod="openshift-marketplace/certified-operators-rq9v8" Jan 05 22:07:30 crc kubenswrapper[4910]: I0105 22:07:30.011391 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5cb07c90-ffc0-481a-87a1-e0e87de5c6e3-catalog-content\") pod \"certified-operators-rq9v8\" (UID: \"5cb07c90-ffc0-481a-87a1-e0e87de5c6e3\") " pod="openshift-marketplace/certified-operators-rq9v8" Jan 05 22:07:30 crc kubenswrapper[4910]: I0105 22:07:30.016308 4910 generic.go:334] "Generic (PLEG): container finished" podID="046dfbab-7fe9-4d13-8cd6-306794250fd2" containerID="30bcc5c4ab52ac2251d2d3d819dc132be5d04d44097f311e1e04204c0d344889" exitCode=0 Jan 05 22:07:30 crc kubenswrapper[4910]: I0105 22:07:30.016371 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kgjzr" event={"ID":"046dfbab-7fe9-4d13-8cd6-306794250fd2","Type":"ContainerDied","Data":"30bcc5c4ab52ac2251d2d3d819dc132be5d04d44097f311e1e04204c0d344889"} Jan 05 22:07:30 crc kubenswrapper[4910]: I0105 22:07:30.113226 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5cb07c90-ffc0-481a-87a1-e0e87de5c6e3-utilities\") pod \"certified-operators-rq9v8\" (UID: \"5cb07c90-ffc0-481a-87a1-e0e87de5c6e3\") " pod="openshift-marketplace/certified-operators-rq9v8" Jan 05 22:07:30 crc kubenswrapper[4910]: I0105 22:07:30.113286 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-488hq\" (UniqueName: \"kubernetes.io/projected/5cb07c90-ffc0-481a-87a1-e0e87de5c6e3-kube-api-access-488hq\") pod \"certified-operators-rq9v8\" (UID: \"5cb07c90-ffc0-481a-87a1-e0e87de5c6e3\") " pod="openshift-marketplace/certified-operators-rq9v8" Jan 05 22:07:30 crc kubenswrapper[4910]: I0105 22:07:30.113330 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5cb07c90-ffc0-481a-87a1-e0e87de5c6e3-catalog-content\") pod \"certified-operators-rq9v8\" (UID: \"5cb07c90-ffc0-481a-87a1-e0e87de5c6e3\") " pod="openshift-marketplace/certified-operators-rq9v8" Jan 05 22:07:30 crc kubenswrapper[4910]: I0105 22:07:30.114068 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5cb07c90-ffc0-481a-87a1-e0e87de5c6e3-utilities\") pod \"certified-operators-rq9v8\" (UID: \"5cb07c90-ffc0-481a-87a1-e0e87de5c6e3\") " pod="openshift-marketplace/certified-operators-rq9v8" Jan 05 22:07:30 crc kubenswrapper[4910]: I0105 22:07:30.114099 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5cb07c90-ffc0-481a-87a1-e0e87de5c6e3-catalog-content\") pod \"certified-operators-rq9v8\" (UID: \"5cb07c90-ffc0-481a-87a1-e0e87de5c6e3\") " pod="openshift-marketplace/certified-operators-rq9v8" Jan 05 22:07:30 crc kubenswrapper[4910]: I0105 22:07:30.142095 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-488hq\" (UniqueName: \"kubernetes.io/projected/5cb07c90-ffc0-481a-87a1-e0e87de5c6e3-kube-api-access-488hq\") pod \"certified-operators-rq9v8\" (UID: \"5cb07c90-ffc0-481a-87a1-e0e87de5c6e3\") " pod="openshift-marketplace/certified-operators-rq9v8" Jan 05 22:07:30 crc kubenswrapper[4910]: I0105 22:07:30.256093 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rq9v8" Jan 05 22:07:30 crc kubenswrapper[4910]: I0105 22:07:30.668364 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kgjzr" Jan 05 22:07:30 crc kubenswrapper[4910]: I0105 22:07:30.816991 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rq9v8"] Jan 05 22:07:30 crc kubenswrapper[4910]: I0105 22:07:30.824053 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/046dfbab-7fe9-4d13-8cd6-306794250fd2-utilities\") pod \"046dfbab-7fe9-4d13-8cd6-306794250fd2\" (UID: \"046dfbab-7fe9-4d13-8cd6-306794250fd2\") " Jan 05 22:07:30 crc kubenswrapper[4910]: I0105 22:07:30.824202 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/046dfbab-7fe9-4d13-8cd6-306794250fd2-catalog-content\") pod \"046dfbab-7fe9-4d13-8cd6-306794250fd2\" (UID: \"046dfbab-7fe9-4d13-8cd6-306794250fd2\") " Jan 05 22:07:30 crc kubenswrapper[4910]: I0105 22:07:30.824233 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-26l78\" (UniqueName: \"kubernetes.io/projected/046dfbab-7fe9-4d13-8cd6-306794250fd2-kube-api-access-26l78\") pod \"046dfbab-7fe9-4d13-8cd6-306794250fd2\" (UID: \"046dfbab-7fe9-4d13-8cd6-306794250fd2\") " Jan 05 22:07:30 crc kubenswrapper[4910]: I0105 22:07:30.825207 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/046dfbab-7fe9-4d13-8cd6-306794250fd2-utilities" (OuterVolumeSpecName: "utilities") pod "046dfbab-7fe9-4d13-8cd6-306794250fd2" (UID: "046dfbab-7fe9-4d13-8cd6-306794250fd2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:07:30 crc kubenswrapper[4910]: I0105 22:07:30.826051 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/046dfbab-7fe9-4d13-8cd6-306794250fd2-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 22:07:30 crc kubenswrapper[4910]: I0105 22:07:30.830302 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/046dfbab-7fe9-4d13-8cd6-306794250fd2-kube-api-access-26l78" (OuterVolumeSpecName: "kube-api-access-26l78") pod "046dfbab-7fe9-4d13-8cd6-306794250fd2" (UID: "046dfbab-7fe9-4d13-8cd6-306794250fd2"). InnerVolumeSpecName "kube-api-access-26l78". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:07:30 crc kubenswrapper[4910]: I0105 22:07:30.872762 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/046dfbab-7fe9-4d13-8cd6-306794250fd2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "046dfbab-7fe9-4d13-8cd6-306794250fd2" (UID: "046dfbab-7fe9-4d13-8cd6-306794250fd2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:07:30 crc kubenswrapper[4910]: I0105 22:07:30.928173 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/046dfbab-7fe9-4d13-8cd6-306794250fd2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 22:07:30 crc kubenswrapper[4910]: I0105 22:07:30.928231 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-26l78\" (UniqueName: \"kubernetes.io/projected/046dfbab-7fe9-4d13-8cd6-306794250fd2-kube-api-access-26l78\") on node \"crc\" DevicePath \"\"" Jan 05 22:07:31 crc kubenswrapper[4910]: I0105 22:07:31.025260 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rq9v8" event={"ID":"5cb07c90-ffc0-481a-87a1-e0e87de5c6e3","Type":"ContainerStarted","Data":"b678afdcfd5dfb8993fba8198b2546c4595cb8b966fa6449309cc5a24c03f732"} Jan 05 22:07:31 crc kubenswrapper[4910]: I0105 22:07:31.027795 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kgjzr" event={"ID":"046dfbab-7fe9-4d13-8cd6-306794250fd2","Type":"ContainerDied","Data":"3fb22236f9d9f211de86c397196ee3446d309c669f5498157eddc50675173f07"} Jan 05 22:07:31 crc kubenswrapper[4910]: I0105 22:07:31.027876 4910 scope.go:117] "RemoveContainer" containerID="30bcc5c4ab52ac2251d2d3d819dc132be5d04d44097f311e1e04204c0d344889" Jan 05 22:07:31 crc kubenswrapper[4910]: I0105 22:07:31.027878 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kgjzr" Jan 05 22:07:31 crc kubenswrapper[4910]: I0105 22:07:31.046833 4910 scope.go:117] "RemoveContainer" containerID="0ec388915fb497c1a2a991ca5ef577e601c2ba100c59bb2154aabd8062af7cd6" Jan 05 22:07:31 crc kubenswrapper[4910]: I0105 22:07:31.061591 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-kgjzr"] Jan 05 22:07:31 crc kubenswrapper[4910]: I0105 22:07:31.066905 4910 scope.go:117] "RemoveContainer" containerID="1fa1237986121eaeddd9783e462fadbb3c1bbff7ea550a1a71cb88d04f0cfd45" Jan 05 22:07:31 crc kubenswrapper[4910]: I0105 22:07:31.066940 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-kgjzr"] Jan 05 22:07:32 crc kubenswrapper[4910]: I0105 22:07:32.037204 4910 generic.go:334] "Generic (PLEG): container finished" podID="5cb07c90-ffc0-481a-87a1-e0e87de5c6e3" containerID="7fb14b0b0152e0d4343bd402ac67e9f89df6c916fc8d96dfeedcf5cfd1c8a46f" exitCode=0 Jan 05 22:07:32 crc kubenswrapper[4910]: I0105 22:07:32.037273 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rq9v8" event={"ID":"5cb07c90-ffc0-481a-87a1-e0e87de5c6e3","Type":"ContainerDied","Data":"7fb14b0b0152e0d4343bd402ac67e9f89df6c916fc8d96dfeedcf5cfd1c8a46f"} Jan 05 22:07:32 crc kubenswrapper[4910]: I0105 22:07:32.729681 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="046dfbab-7fe9-4d13-8cd6-306794250fd2" path="/var/lib/kubelet/pods/046dfbab-7fe9-4d13-8cd6-306794250fd2/volumes" Jan 05 22:07:33 crc kubenswrapper[4910]: I0105 22:07:33.050005 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rq9v8" event={"ID":"5cb07c90-ffc0-481a-87a1-e0e87de5c6e3","Type":"ContainerStarted","Data":"e195c6db5ce9847bc40517526a2f12053325fcb882427dacd323dcdc9a4bf8b8"} Jan 05 22:07:34 crc kubenswrapper[4910]: I0105 22:07:34.065597 4910 generic.go:334] "Generic (PLEG): container finished" podID="5cb07c90-ffc0-481a-87a1-e0e87de5c6e3" containerID="e195c6db5ce9847bc40517526a2f12053325fcb882427dacd323dcdc9a4bf8b8" exitCode=0 Jan 05 22:07:34 crc kubenswrapper[4910]: I0105 22:07:34.065687 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rq9v8" event={"ID":"5cb07c90-ffc0-481a-87a1-e0e87de5c6e3","Type":"ContainerDied","Data":"e195c6db5ce9847bc40517526a2f12053325fcb882427dacd323dcdc9a4bf8b8"} Jan 05 22:07:35 crc kubenswrapper[4910]: I0105 22:07:35.078414 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rq9v8" event={"ID":"5cb07c90-ffc0-481a-87a1-e0e87de5c6e3","Type":"ContainerStarted","Data":"5d9bb2eb41dda024f01bbe4b265b179950d6b4d9222fd688cb695c2ccfa5502f"} Jan 05 22:07:35 crc kubenswrapper[4910]: I0105 22:07:35.106841 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-rq9v8" podStartSLOduration=3.387521003 podStartE2EDuration="6.106815049s" podCreationTimestamp="2026-01-05 22:07:29 +0000 UTC" firstStartedPulling="2026-01-05 22:07:32.040231632 +0000 UTC m=+983.617729302" lastFinishedPulling="2026-01-05 22:07:34.759525648 +0000 UTC m=+986.337023348" observedRunningTime="2026-01-05 22:07:35.100378952 +0000 UTC m=+986.677876642" watchObservedRunningTime="2026-01-05 22:07:35.106815049 +0000 UTC m=+986.684312729" Jan 05 22:07:40 crc kubenswrapper[4910]: I0105 22:07:40.256543 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-rq9v8" Jan 05 22:07:40 crc kubenswrapper[4910]: I0105 22:07:40.257177 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-rq9v8" Jan 05 22:07:40 crc kubenswrapper[4910]: I0105 22:07:40.318087 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-rq9v8" Jan 05 22:07:40 crc kubenswrapper[4910]: I0105 22:07:40.953073 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:07:40 crc kubenswrapper[4910]: I0105 22:07:40.953586 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:07:41 crc kubenswrapper[4910]: I0105 22:07:41.172774 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-rq9v8" Jan 05 22:07:41 crc kubenswrapper[4910]: I0105 22:07:41.269413 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rq9v8"] Jan 05 22:07:43 crc kubenswrapper[4910]: I0105 22:07:43.133707 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-rq9v8" podUID="5cb07c90-ffc0-481a-87a1-e0e87de5c6e3" containerName="registry-server" containerID="cri-o://5d9bb2eb41dda024f01bbe4b265b179950d6b4d9222fd688cb695c2ccfa5502f" gracePeriod=2 Jan 05 22:07:44 crc kubenswrapper[4910]: I0105 22:07:44.645732 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rq9v8" Jan 05 22:07:44 crc kubenswrapper[4910]: I0105 22:07:44.766935 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5cb07c90-ffc0-481a-87a1-e0e87de5c6e3-utilities\") pod \"5cb07c90-ffc0-481a-87a1-e0e87de5c6e3\" (UID: \"5cb07c90-ffc0-481a-87a1-e0e87de5c6e3\") " Jan 05 22:07:44 crc kubenswrapper[4910]: I0105 22:07:44.767004 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-488hq\" (UniqueName: \"kubernetes.io/projected/5cb07c90-ffc0-481a-87a1-e0e87de5c6e3-kube-api-access-488hq\") pod \"5cb07c90-ffc0-481a-87a1-e0e87de5c6e3\" (UID: \"5cb07c90-ffc0-481a-87a1-e0e87de5c6e3\") " Jan 05 22:07:44 crc kubenswrapper[4910]: I0105 22:07:44.767764 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5cb07c90-ffc0-481a-87a1-e0e87de5c6e3-utilities" (OuterVolumeSpecName: "utilities") pod "5cb07c90-ffc0-481a-87a1-e0e87de5c6e3" (UID: "5cb07c90-ffc0-481a-87a1-e0e87de5c6e3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:07:44 crc kubenswrapper[4910]: I0105 22:07:44.767037 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5cb07c90-ffc0-481a-87a1-e0e87de5c6e3-catalog-content\") pod \"5cb07c90-ffc0-481a-87a1-e0e87de5c6e3\" (UID: \"5cb07c90-ffc0-481a-87a1-e0e87de5c6e3\") " Jan 05 22:07:44 crc kubenswrapper[4910]: I0105 22:07:44.768353 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5cb07c90-ffc0-481a-87a1-e0e87de5c6e3-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 22:07:44 crc kubenswrapper[4910]: I0105 22:07:44.774770 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5cb07c90-ffc0-481a-87a1-e0e87de5c6e3-kube-api-access-488hq" (OuterVolumeSpecName: "kube-api-access-488hq") pod "5cb07c90-ffc0-481a-87a1-e0e87de5c6e3" (UID: "5cb07c90-ffc0-481a-87a1-e0e87de5c6e3"). InnerVolumeSpecName "kube-api-access-488hq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:07:44 crc kubenswrapper[4910]: I0105 22:07:44.812766 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5cb07c90-ffc0-481a-87a1-e0e87de5c6e3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5cb07c90-ffc0-481a-87a1-e0e87de5c6e3" (UID: "5cb07c90-ffc0-481a-87a1-e0e87de5c6e3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:07:44 crc kubenswrapper[4910]: I0105 22:07:44.870304 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-488hq\" (UniqueName: \"kubernetes.io/projected/5cb07c90-ffc0-481a-87a1-e0e87de5c6e3-kube-api-access-488hq\") on node \"crc\" DevicePath \"\"" Jan 05 22:07:44 crc kubenswrapper[4910]: I0105 22:07:44.870644 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5cb07c90-ffc0-481a-87a1-e0e87de5c6e3-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 22:07:45 crc kubenswrapper[4910]: I0105 22:07:45.154113 4910 generic.go:334] "Generic (PLEG): container finished" podID="5cb07c90-ffc0-481a-87a1-e0e87de5c6e3" containerID="5d9bb2eb41dda024f01bbe4b265b179950d6b4d9222fd688cb695c2ccfa5502f" exitCode=0 Jan 05 22:07:45 crc kubenswrapper[4910]: I0105 22:07:45.154186 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rq9v8" event={"ID":"5cb07c90-ffc0-481a-87a1-e0e87de5c6e3","Type":"ContainerDied","Data":"5d9bb2eb41dda024f01bbe4b265b179950d6b4d9222fd688cb695c2ccfa5502f"} Jan 05 22:07:45 crc kubenswrapper[4910]: I0105 22:07:45.154229 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rq9v8" event={"ID":"5cb07c90-ffc0-481a-87a1-e0e87de5c6e3","Type":"ContainerDied","Data":"b678afdcfd5dfb8993fba8198b2546c4595cb8b966fa6449309cc5a24c03f732"} Jan 05 22:07:45 crc kubenswrapper[4910]: I0105 22:07:45.154248 4910 scope.go:117] "RemoveContainer" containerID="5d9bb2eb41dda024f01bbe4b265b179950d6b4d9222fd688cb695c2ccfa5502f" Jan 05 22:07:45 crc kubenswrapper[4910]: I0105 22:07:45.154294 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rq9v8" Jan 05 22:07:45 crc kubenswrapper[4910]: I0105 22:07:45.178342 4910 scope.go:117] "RemoveContainer" containerID="e195c6db5ce9847bc40517526a2f12053325fcb882427dacd323dcdc9a4bf8b8" Jan 05 22:07:45 crc kubenswrapper[4910]: I0105 22:07:45.204737 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rq9v8"] Jan 05 22:07:45 crc kubenswrapper[4910]: I0105 22:07:45.211937 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-rq9v8"] Jan 05 22:07:45 crc kubenswrapper[4910]: I0105 22:07:45.222245 4910 scope.go:117] "RemoveContainer" containerID="7fb14b0b0152e0d4343bd402ac67e9f89df6c916fc8d96dfeedcf5cfd1c8a46f" Jan 05 22:07:45 crc kubenswrapper[4910]: I0105 22:07:45.243612 4910 scope.go:117] "RemoveContainer" containerID="5d9bb2eb41dda024f01bbe4b265b179950d6b4d9222fd688cb695c2ccfa5502f" Jan 05 22:07:45 crc kubenswrapper[4910]: E0105 22:07:45.244156 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d9bb2eb41dda024f01bbe4b265b179950d6b4d9222fd688cb695c2ccfa5502f\": container with ID starting with 5d9bb2eb41dda024f01bbe4b265b179950d6b4d9222fd688cb695c2ccfa5502f not found: ID does not exist" containerID="5d9bb2eb41dda024f01bbe4b265b179950d6b4d9222fd688cb695c2ccfa5502f" Jan 05 22:07:45 crc kubenswrapper[4910]: I0105 22:07:45.244222 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d9bb2eb41dda024f01bbe4b265b179950d6b4d9222fd688cb695c2ccfa5502f"} err="failed to get container status \"5d9bb2eb41dda024f01bbe4b265b179950d6b4d9222fd688cb695c2ccfa5502f\": rpc error: code = NotFound desc = could not find container \"5d9bb2eb41dda024f01bbe4b265b179950d6b4d9222fd688cb695c2ccfa5502f\": container with ID starting with 5d9bb2eb41dda024f01bbe4b265b179950d6b4d9222fd688cb695c2ccfa5502f not found: ID does not exist" Jan 05 22:07:45 crc kubenswrapper[4910]: I0105 22:07:45.244262 4910 scope.go:117] "RemoveContainer" containerID="e195c6db5ce9847bc40517526a2f12053325fcb882427dacd323dcdc9a4bf8b8" Jan 05 22:07:45 crc kubenswrapper[4910]: E0105 22:07:45.244675 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e195c6db5ce9847bc40517526a2f12053325fcb882427dacd323dcdc9a4bf8b8\": container with ID starting with e195c6db5ce9847bc40517526a2f12053325fcb882427dacd323dcdc9a4bf8b8 not found: ID does not exist" containerID="e195c6db5ce9847bc40517526a2f12053325fcb882427dacd323dcdc9a4bf8b8" Jan 05 22:07:45 crc kubenswrapper[4910]: I0105 22:07:45.244726 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e195c6db5ce9847bc40517526a2f12053325fcb882427dacd323dcdc9a4bf8b8"} err="failed to get container status \"e195c6db5ce9847bc40517526a2f12053325fcb882427dacd323dcdc9a4bf8b8\": rpc error: code = NotFound desc = could not find container \"e195c6db5ce9847bc40517526a2f12053325fcb882427dacd323dcdc9a4bf8b8\": container with ID starting with e195c6db5ce9847bc40517526a2f12053325fcb882427dacd323dcdc9a4bf8b8 not found: ID does not exist" Jan 05 22:07:45 crc kubenswrapper[4910]: I0105 22:07:45.244762 4910 scope.go:117] "RemoveContainer" containerID="7fb14b0b0152e0d4343bd402ac67e9f89df6c916fc8d96dfeedcf5cfd1c8a46f" Jan 05 22:07:45 crc kubenswrapper[4910]: E0105 22:07:45.245327 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7fb14b0b0152e0d4343bd402ac67e9f89df6c916fc8d96dfeedcf5cfd1c8a46f\": container with ID starting with 7fb14b0b0152e0d4343bd402ac67e9f89df6c916fc8d96dfeedcf5cfd1c8a46f not found: ID does not exist" containerID="7fb14b0b0152e0d4343bd402ac67e9f89df6c916fc8d96dfeedcf5cfd1c8a46f" Jan 05 22:07:45 crc kubenswrapper[4910]: I0105 22:07:45.245371 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7fb14b0b0152e0d4343bd402ac67e9f89df6c916fc8d96dfeedcf5cfd1c8a46f"} err="failed to get container status \"7fb14b0b0152e0d4343bd402ac67e9f89df6c916fc8d96dfeedcf5cfd1c8a46f\": rpc error: code = NotFound desc = could not find container \"7fb14b0b0152e0d4343bd402ac67e9f89df6c916fc8d96dfeedcf5cfd1c8a46f\": container with ID starting with 7fb14b0b0152e0d4343bd402ac67e9f89df6c916fc8d96dfeedcf5cfd1c8a46f not found: ID does not exist" Jan 05 22:07:46 crc kubenswrapper[4910]: I0105 22:07:46.735035 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5cb07c90-ffc0-481a-87a1-e0e87de5c6e3" path="/var/lib/kubelet/pods/5cb07c90-ffc0-481a-87a1-e0e87de5c6e3/volumes" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.646081 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-f6f74d6db-t7xl6"] Jan 05 22:07:53 crc kubenswrapper[4910]: E0105 22:07:53.648968 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5cb07c90-ffc0-481a-87a1-e0e87de5c6e3" containerName="registry-server" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.649098 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="5cb07c90-ffc0-481a-87a1-e0e87de5c6e3" containerName="registry-server" Jan 05 22:07:53 crc kubenswrapper[4910]: E0105 22:07:53.649257 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="046dfbab-7fe9-4d13-8cd6-306794250fd2" containerName="registry-server" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.649373 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="046dfbab-7fe9-4d13-8cd6-306794250fd2" containerName="registry-server" Jan 05 22:07:53 crc kubenswrapper[4910]: E0105 22:07:53.649481 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="046dfbab-7fe9-4d13-8cd6-306794250fd2" containerName="extract-content" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.649583 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="046dfbab-7fe9-4d13-8cd6-306794250fd2" containerName="extract-content" Jan 05 22:07:53 crc kubenswrapper[4910]: E0105 22:07:53.649704 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5cb07c90-ffc0-481a-87a1-e0e87de5c6e3" containerName="extract-content" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.649800 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="5cb07c90-ffc0-481a-87a1-e0e87de5c6e3" containerName="extract-content" Jan 05 22:07:53 crc kubenswrapper[4910]: E0105 22:07:53.649927 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5cb07c90-ffc0-481a-87a1-e0e87de5c6e3" containerName="extract-utilities" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.650026 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="5cb07c90-ffc0-481a-87a1-e0e87de5c6e3" containerName="extract-utilities" Jan 05 22:07:53 crc kubenswrapper[4910]: E0105 22:07:53.650427 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="046dfbab-7fe9-4d13-8cd6-306794250fd2" containerName="extract-utilities" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.650605 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="046dfbab-7fe9-4d13-8cd6-306794250fd2" containerName="extract-utilities" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.651015 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="5cb07c90-ffc0-481a-87a1-e0e87de5c6e3" containerName="registry-server" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.651162 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="046dfbab-7fe9-4d13-8cd6-306794250fd2" containerName="registry-server" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.652173 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-f6f74d6db-t7xl6" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.656697 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-tlxbt" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.657831 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-78979fc445-v4jmq"] Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.658999 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-78979fc445-v4jmq" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.661581 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-lrzxf" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.687291 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-f6f74d6db-t7xl6"] Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.692881 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-78979fc445-v4jmq"] Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.723936 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-66f8b87655-d6q5k"] Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.724757 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-66f8b87655-d6q5k" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.735706 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-lvqkr" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.743158 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-66f8b87655-d6q5k"] Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.765028 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-7b549fc966-67s8n"] Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.776217 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-7b549fc966-67s8n" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.781705 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-czgvp" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.783706 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-658dd65b86-hjhdr"] Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.791253 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-7b549fc966-67s8n"] Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.791410 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-658dd65b86-hjhdr" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.797458 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-wsdcf" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.812249 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-658dd65b86-hjhdr"] Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.813262 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j25fm\" (UniqueName: \"kubernetes.io/projected/11188457-aabb-45d0-85d5-3ae1fc7a085f-kube-api-access-j25fm\") pod \"barbican-operator-controller-manager-f6f74d6db-t7xl6\" (UID: \"11188457-aabb-45d0-85d5-3ae1fc7a085f\") " pod="openstack-operators/barbican-operator-controller-manager-f6f74d6db-t7xl6" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.813351 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pt6vk\" (UniqueName: \"kubernetes.io/projected/10a1f9a3-7d22-4e02-8b5b-4ae1374194cf-kube-api-access-pt6vk\") pod \"cinder-operator-controller-manager-78979fc445-v4jmq\" (UID: \"10a1f9a3-7d22-4e02-8b5b-4ae1374194cf\") " pod="openstack-operators/cinder-operator-controller-manager-78979fc445-v4jmq" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.813393 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wqgb4\" (UniqueName: \"kubernetes.io/projected/af80b86a-ae5b-4e42-b4c7-9fc033d4fd26-kube-api-access-wqgb4\") pod \"designate-operator-controller-manager-66f8b87655-d6q5k\" (UID: \"af80b86a-ae5b-4e42-b4c7-9fc033d4fd26\") " pod="openstack-operators/designate-operator-controller-manager-66f8b87655-d6q5k" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.859229 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-f99f54bc8-hfxg8"] Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.860726 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-f99f54bc8-hfxg8" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.865201 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-x8nfd" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.868212 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-6d99759cf-89lvl"] Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.869512 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-6d99759cf-89lvl" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.884395 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.884897 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-bhz9k" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.890920 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-7f5ddd8d7b-ht2cs"] Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.891916 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-7f5ddd8d7b-ht2cs" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.902925 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-krqq4" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.903194 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-f99f54bc8-hfxg8"] Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.915152 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pt6vk\" (UniqueName: \"kubernetes.io/projected/10a1f9a3-7d22-4e02-8b5b-4ae1374194cf-kube-api-access-pt6vk\") pod \"cinder-operator-controller-manager-78979fc445-v4jmq\" (UID: \"10a1f9a3-7d22-4e02-8b5b-4ae1374194cf\") " pod="openstack-operators/cinder-operator-controller-manager-78979fc445-v4jmq" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.915207 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5nw5b\" (UniqueName: \"kubernetes.io/projected/69c2eea7-1ac0-42b2-b1b7-d4ffaba1a9b7-kube-api-access-5nw5b\") pod \"heat-operator-controller-manager-658dd65b86-hjhdr\" (UID: \"69c2eea7-1ac0-42b2-b1b7-d4ffaba1a9b7\") " pod="openstack-operators/heat-operator-controller-manager-658dd65b86-hjhdr" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.915243 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wqgb4\" (UniqueName: \"kubernetes.io/projected/af80b86a-ae5b-4e42-b4c7-9fc033d4fd26-kube-api-access-wqgb4\") pod \"designate-operator-controller-manager-66f8b87655-d6q5k\" (UID: \"af80b86a-ae5b-4e42-b4c7-9fc033d4fd26\") " pod="openstack-operators/designate-operator-controller-manager-66f8b87655-d6q5k" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.915261 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ttcf\" (UniqueName: \"kubernetes.io/projected/c3362390-1824-422e-8a9b-dfcfc1098cfd-kube-api-access-5ttcf\") pod \"glance-operator-controller-manager-7b549fc966-67s8n\" (UID: \"c3362390-1824-422e-8a9b-dfcfc1098cfd\") " pod="openstack-operators/glance-operator-controller-manager-7b549fc966-67s8n" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.915339 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j25fm\" (UniqueName: \"kubernetes.io/projected/11188457-aabb-45d0-85d5-3ae1fc7a085f-kube-api-access-j25fm\") pod \"barbican-operator-controller-manager-f6f74d6db-t7xl6\" (UID: \"11188457-aabb-45d0-85d5-3ae1fc7a085f\") " pod="openstack-operators/barbican-operator-controller-manager-f6f74d6db-t7xl6" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.917740 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-6d99759cf-89lvl"] Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.939222 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-7f5ddd8d7b-ht2cs"] Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.953312 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pt6vk\" (UniqueName: \"kubernetes.io/projected/10a1f9a3-7d22-4e02-8b5b-4ae1374194cf-kube-api-access-pt6vk\") pod \"cinder-operator-controller-manager-78979fc445-v4jmq\" (UID: \"10a1f9a3-7d22-4e02-8b5b-4ae1374194cf\") " pod="openstack-operators/cinder-operator-controller-manager-78979fc445-v4jmq" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.962286 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wqgb4\" (UniqueName: \"kubernetes.io/projected/af80b86a-ae5b-4e42-b4c7-9fc033d4fd26-kube-api-access-wqgb4\") pod \"designate-operator-controller-manager-66f8b87655-d6q5k\" (UID: \"af80b86a-ae5b-4e42-b4c7-9fc033d4fd26\") " pod="openstack-operators/designate-operator-controller-manager-66f8b87655-d6q5k" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.970287 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-598945d5b8-n7nvl"] Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.971232 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-598945d5b8-n7nvl" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.979572 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-p266x" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.982898 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j25fm\" (UniqueName: \"kubernetes.io/projected/11188457-aabb-45d0-85d5-3ae1fc7a085f-kube-api-access-j25fm\") pod \"barbican-operator-controller-manager-f6f74d6db-t7xl6\" (UID: \"11188457-aabb-45d0-85d5-3ae1fc7a085f\") " pod="openstack-operators/barbican-operator-controller-manager-f6f74d6db-t7xl6" Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.990450 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-598945d5b8-n7nvl"] Jan 05 22:07:53 crc kubenswrapper[4910]: I0105 22:07:53.995204 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-78979fc445-v4jmq" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.008686 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-568985c78-wk9gv"] Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.009760 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-568985c78-wk9gv" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.016564 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-bsgsn" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.017069 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ck4dm\" (UniqueName: \"kubernetes.io/projected/6bfbc4ba-ffa7-42df-9123-945bbe818352-kube-api-access-ck4dm\") pod \"ironic-operator-controller-manager-f99f54bc8-hfxg8\" (UID: \"6bfbc4ba-ffa7-42df-9123-945bbe818352\") " pod="openstack-operators/ironic-operator-controller-manager-f99f54bc8-hfxg8" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.017161 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5nw5b\" (UniqueName: \"kubernetes.io/projected/69c2eea7-1ac0-42b2-b1b7-d4ffaba1a9b7-kube-api-access-5nw5b\") pod \"heat-operator-controller-manager-658dd65b86-hjhdr\" (UID: \"69c2eea7-1ac0-42b2-b1b7-d4ffaba1a9b7\") " pod="openstack-operators/heat-operator-controller-manager-658dd65b86-hjhdr" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.017184 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whdjh\" (UniqueName: \"kubernetes.io/projected/98338ebd-fd6a-49de-a042-edb94e115570-kube-api-access-whdjh\") pod \"horizon-operator-controller-manager-7f5ddd8d7b-ht2cs\" (UID: \"98338ebd-fd6a-49de-a042-edb94e115570\") " pod="openstack-operators/horizon-operator-controller-manager-7f5ddd8d7b-ht2cs" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.017211 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ttcf\" (UniqueName: \"kubernetes.io/projected/c3362390-1824-422e-8a9b-dfcfc1098cfd-kube-api-access-5ttcf\") pod \"glance-operator-controller-manager-7b549fc966-67s8n\" (UID: \"c3362390-1824-422e-8a9b-dfcfc1098cfd\") " pod="openstack-operators/glance-operator-controller-manager-7b549fc966-67s8n" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.017255 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed-cert\") pod \"infra-operator-controller-manager-6d99759cf-89lvl\" (UID: \"b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed\") " pod="openstack-operators/infra-operator-controller-manager-6d99759cf-89lvl" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.017291 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kz5wd\" (UniqueName: \"kubernetes.io/projected/b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed-kube-api-access-kz5wd\") pod \"infra-operator-controller-manager-6d99759cf-89lvl\" (UID: \"b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed\") " pod="openstack-operators/infra-operator-controller-manager-6d99759cf-89lvl" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.032003 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-568985c78-wk9gv"] Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.040232 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-7b88bfc995-fvgtr"] Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.040708 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ttcf\" (UniqueName: \"kubernetes.io/projected/c3362390-1824-422e-8a9b-dfcfc1098cfd-kube-api-access-5ttcf\") pod \"glance-operator-controller-manager-7b549fc966-67s8n\" (UID: \"c3362390-1824-422e-8a9b-dfcfc1098cfd\") " pod="openstack-operators/glance-operator-controller-manager-7b549fc966-67s8n" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.041656 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-7b88bfc995-fvgtr" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.045078 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-2qtrb"] Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.046006 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-2qtrb" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.046705 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-zvsct" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.047855 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-66f8b87655-d6q5k" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.056153 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-gw4t8" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.059899 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7cd87b778f-trrvg"] Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.060984 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-trrvg" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.064078 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-2qtrb"] Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.071819 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5nw5b\" (UniqueName: \"kubernetes.io/projected/69c2eea7-1ac0-42b2-b1b7-d4ffaba1a9b7-kube-api-access-5nw5b\") pod \"heat-operator-controller-manager-658dd65b86-hjhdr\" (UID: \"69c2eea7-1ac0-42b2-b1b7-d4ffaba1a9b7\") " pod="openstack-operators/heat-operator-controller-manager-658dd65b86-hjhdr" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.072383 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-7b88bfc995-fvgtr"] Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.072676 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-5kwk9" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.084040 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-68c649d9d-p4tsz"] Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.085232 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-p4tsz" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.093262 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7cd87b778f-trrvg"] Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.120624 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-g2tmj" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.124109 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-7b549fc966-67s8n" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.127669 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-658dd65b86-hjhdr" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.154182 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8c29l\" (UniqueName: \"kubernetes.io/projected/84b7e891-c710-431d-81e3-3d0fef0bf08e-kube-api-access-8c29l\") pod \"keystone-operator-controller-manager-568985c78-wk9gv\" (UID: \"84b7e891-c710-431d-81e3-3d0fef0bf08e\") " pod="openstack-operators/keystone-operator-controller-manager-568985c78-wk9gv" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.154378 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kz5wd\" (UniqueName: \"kubernetes.io/projected/b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed-kube-api-access-kz5wd\") pod \"infra-operator-controller-manager-6d99759cf-89lvl\" (UID: \"b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed\") " pod="openstack-operators/infra-operator-controller-manager-6d99759cf-89lvl" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.154480 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ck4dm\" (UniqueName: \"kubernetes.io/projected/6bfbc4ba-ffa7-42df-9123-945bbe818352-kube-api-access-ck4dm\") pod \"ironic-operator-controller-manager-f99f54bc8-hfxg8\" (UID: \"6bfbc4ba-ffa7-42df-9123-945bbe818352\") " pod="openstack-operators/ironic-operator-controller-manager-f99f54bc8-hfxg8" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.178860 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whdjh\" (UniqueName: \"kubernetes.io/projected/98338ebd-fd6a-49de-a042-edb94e115570-kube-api-access-whdjh\") pod \"horizon-operator-controller-manager-7f5ddd8d7b-ht2cs\" (UID: \"98338ebd-fd6a-49de-a042-edb94e115570\") " pod="openstack-operators/horizon-operator-controller-manager-7f5ddd8d7b-ht2cs" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.178964 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-smtjc\" (UniqueName: \"kubernetes.io/projected/aa22d9f9-f865-495a-a0f0-a8aa424051aa-kube-api-access-smtjc\") pod \"nova-operator-controller-manager-5fbbf8b6cc-2qtrb\" (UID: \"aa22d9f9-f865-495a-a0f0-a8aa424051aa\") " pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-2qtrb" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.179012 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mr6br\" (UniqueName: \"kubernetes.io/projected/c4b3f034-ce14-4081-a47c-feac32565388-kube-api-access-mr6br\") pod \"mariadb-operator-controller-manager-7b88bfc995-fvgtr\" (UID: \"c4b3f034-ce14-4081-a47c-feac32565388\") " pod="openstack-operators/mariadb-operator-controller-manager-7b88bfc995-fvgtr" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.179055 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ljk2m\" (UniqueName: \"kubernetes.io/projected/ab5bdb68-d0c3-436b-a7d5-36fe8be5bd90-kube-api-access-ljk2m\") pod \"manila-operator-controller-manager-598945d5b8-n7nvl\" (UID: \"ab5bdb68-d0c3-436b-a7d5-36fe8be5bd90\") " pod="openstack-operators/manila-operator-controller-manager-598945d5b8-n7nvl" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.179096 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed-cert\") pod \"infra-operator-controller-manager-6d99759cf-89lvl\" (UID: \"b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed\") " pod="openstack-operators/infra-operator-controller-manager-6d99759cf-89lvl" Jan 05 22:07:54 crc kubenswrapper[4910]: E0105 22:07:54.179310 4910 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 05 22:07:54 crc kubenswrapper[4910]: E0105 22:07:54.179386 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed-cert podName:b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed nodeName:}" failed. No retries permitted until 2026-01-05 22:07:54.67935905 +0000 UTC m=+1006.256856720 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed-cert") pod "infra-operator-controller-manager-6d99759cf-89lvl" (UID: "b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed") : secret "infra-operator-webhook-server-cert" not found Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.217610 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kz5wd\" (UniqueName: \"kubernetes.io/projected/b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed-kube-api-access-kz5wd\") pod \"infra-operator-controller-manager-6d99759cf-89lvl\" (UID: \"b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed\") " pod="openstack-operators/infra-operator-controller-manager-6d99759cf-89lvl" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.221916 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whdjh\" (UniqueName: \"kubernetes.io/projected/98338ebd-fd6a-49de-a042-edb94e115570-kube-api-access-whdjh\") pod \"horizon-operator-controller-manager-7f5ddd8d7b-ht2cs\" (UID: \"98338ebd-fd6a-49de-a042-edb94e115570\") " pod="openstack-operators/horizon-operator-controller-manager-7f5ddd8d7b-ht2cs" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.224183 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ck4dm\" (UniqueName: \"kubernetes.io/projected/6bfbc4ba-ffa7-42df-9123-945bbe818352-kube-api-access-ck4dm\") pod \"ironic-operator-controller-manager-f99f54bc8-hfxg8\" (UID: \"6bfbc4ba-ffa7-42df-9123-945bbe818352\") " pod="openstack-operators/ironic-operator-controller-manager-f99f54bc8-hfxg8" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.228976 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-7f5ddd8d7b-ht2cs" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.286493 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-f6f74d6db-t7xl6" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.288556 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xnszh\" (UniqueName: \"kubernetes.io/projected/ac3384d0-7a86-4e80-94a7-e0ff9bd32143-kube-api-access-xnszh\") pod \"octavia-operator-controller-manager-68c649d9d-p4tsz\" (UID: \"ac3384d0-7a86-4e80-94a7-e0ff9bd32143\") " pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-p4tsz" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.288618 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvm2l\" (UniqueName: \"kubernetes.io/projected/dee3cccb-8251-41b8-82ec-a696001f803d-kube-api-access-bvm2l\") pod \"neutron-operator-controller-manager-7cd87b778f-trrvg\" (UID: \"dee3cccb-8251-41b8-82ec-a696001f803d\") " pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-trrvg" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.288647 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-smtjc\" (UniqueName: \"kubernetes.io/projected/aa22d9f9-f865-495a-a0f0-a8aa424051aa-kube-api-access-smtjc\") pod \"nova-operator-controller-manager-5fbbf8b6cc-2qtrb\" (UID: \"aa22d9f9-f865-495a-a0f0-a8aa424051aa\") " pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-2qtrb" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.288674 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mr6br\" (UniqueName: \"kubernetes.io/projected/c4b3f034-ce14-4081-a47c-feac32565388-kube-api-access-mr6br\") pod \"mariadb-operator-controller-manager-7b88bfc995-fvgtr\" (UID: \"c4b3f034-ce14-4081-a47c-feac32565388\") " pod="openstack-operators/mariadb-operator-controller-manager-7b88bfc995-fvgtr" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.288701 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ljk2m\" (UniqueName: \"kubernetes.io/projected/ab5bdb68-d0c3-436b-a7d5-36fe8be5bd90-kube-api-access-ljk2m\") pod \"manila-operator-controller-manager-598945d5b8-n7nvl\" (UID: \"ab5bdb68-d0c3-436b-a7d5-36fe8be5bd90\") " pod="openstack-operators/manila-operator-controller-manager-598945d5b8-n7nvl" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.288749 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8c29l\" (UniqueName: \"kubernetes.io/projected/84b7e891-c710-431d-81e3-3d0fef0bf08e-kube-api-access-8c29l\") pod \"keystone-operator-controller-manager-568985c78-wk9gv\" (UID: \"84b7e891-c710-431d-81e3-3d0fef0bf08e\") " pod="openstack-operators/keystone-operator-controller-manager-568985c78-wk9gv" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.313771 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-68c649d9d-p4tsz"] Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.329615 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-smtjc\" (UniqueName: \"kubernetes.io/projected/aa22d9f9-f865-495a-a0f0-a8aa424051aa-kube-api-access-smtjc\") pod \"nova-operator-controller-manager-5fbbf8b6cc-2qtrb\" (UID: \"aa22d9f9-f865-495a-a0f0-a8aa424051aa\") " pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-2qtrb" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.335313 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ljk2m\" (UniqueName: \"kubernetes.io/projected/ab5bdb68-d0c3-436b-a7d5-36fe8be5bd90-kube-api-access-ljk2m\") pod \"manila-operator-controller-manager-598945d5b8-n7nvl\" (UID: \"ab5bdb68-d0c3-436b-a7d5-36fe8be5bd90\") " pod="openstack-operators/manila-operator-controller-manager-598945d5b8-n7nvl" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.335321 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mr6br\" (UniqueName: \"kubernetes.io/projected/c4b3f034-ce14-4081-a47c-feac32565388-kube-api-access-mr6br\") pod \"mariadb-operator-controller-manager-7b88bfc995-fvgtr\" (UID: \"c4b3f034-ce14-4081-a47c-feac32565388\") " pod="openstack-operators/mariadb-operator-controller-manager-7b88bfc995-fvgtr" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.343588 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-bf6d4f946-mrws8"] Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.343643 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8c29l\" (UniqueName: \"kubernetes.io/projected/84b7e891-c710-431d-81e3-3d0fef0bf08e-kube-api-access-8c29l\") pod \"keystone-operator-controller-manager-568985c78-wk9gv\" (UID: \"84b7e891-c710-431d-81e3-3d0fef0bf08e\") " pod="openstack-operators/keystone-operator-controller-manager-568985c78-wk9gv" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.347468 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-bb586bbf4-75hnw"] Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.348040 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-mrws8" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.348775 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-bb586bbf4-75hnw" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.353818 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-zwg7f" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.354764 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-x2ljf" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.379348 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-9b6f8f78c-8j9cf"] Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.382154 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-9b6f8f78c-8j9cf" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.387162 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-cj2mb" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.390947 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xnszh\" (UniqueName: \"kubernetes.io/projected/ac3384d0-7a86-4e80-94a7-e0ff9bd32143-kube-api-access-xnszh\") pod \"octavia-operator-controller-manager-68c649d9d-p4tsz\" (UID: \"ac3384d0-7a86-4e80-94a7-e0ff9bd32143\") " pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-p4tsz" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.391131 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvm2l\" (UniqueName: \"kubernetes.io/projected/dee3cccb-8251-41b8-82ec-a696001f803d-kube-api-access-bvm2l\") pod \"neutron-operator-controller-manager-7cd87b778f-trrvg\" (UID: \"dee3cccb-8251-41b8-82ec-a696001f803d\") " pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-trrvg" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.408914 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-9b6f8f78c-8j9cf"] Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.411018 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-bb586bbf4-75hnw"] Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.416946 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-bf6d4f946-mrws8"] Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.425704 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-596c464d775rs5r"] Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.426916 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-596c464d775rs5r" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.428334 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xnszh\" (UniqueName: \"kubernetes.io/projected/ac3384d0-7a86-4e80-94a7-e0ff9bd32143-kube-api-access-xnszh\") pod \"octavia-operator-controller-manager-68c649d9d-p4tsz\" (UID: \"ac3384d0-7a86-4e80-94a7-e0ff9bd32143\") " pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-p4tsz" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.428756 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvm2l\" (UniqueName: \"kubernetes.io/projected/dee3cccb-8251-41b8-82ec-a696001f803d-kube-api-access-bvm2l\") pod \"neutron-operator-controller-manager-7cd87b778f-trrvg\" (UID: \"dee3cccb-8251-41b8-82ec-a696001f803d\") " pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-trrvg" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.432481 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-pp76s" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.432675 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.438547 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-6c866cfdcb-j9dht"] Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.439461 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-598945d5b8-n7nvl" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.440019 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-6c866cfdcb-j9dht" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.443279 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-svsw4" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.451267 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-68d988df55-4xm5m"] Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.452311 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-68d988df55-4xm5m" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.454472 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-49wt6" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.457839 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-596c464d775rs5r"] Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.485925 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-f99f54bc8-hfxg8" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.485969 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-568985c78-wk9gv" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.493377 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-6c866cfdcb-j9dht"] Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.493709 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zwxhm\" (UniqueName: \"kubernetes.io/projected/5965fe2f-f268-418d-b039-682eb20f87ea-kube-api-access-zwxhm\") pod \"swift-operator-controller-manager-bb586bbf4-75hnw\" (UID: \"5965fe2f-f268-418d-b039-682eb20f87ea\") " pod="openstack-operators/swift-operator-controller-manager-bb586bbf4-75hnw" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.493798 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dz849\" (UniqueName: \"kubernetes.io/projected/9224f0b2-2621-43c8-b061-66c826994814-kube-api-access-dz849\") pod \"placement-operator-controller-manager-9b6f8f78c-8j9cf\" (UID: \"9224f0b2-2621-43c8-b061-66c826994814\") " pod="openstack-operators/placement-operator-controller-manager-9b6f8f78c-8j9cf" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.493839 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxnqn\" (UniqueName: \"kubernetes.io/projected/e03b4a44-b8b3-46db-a760-cb3f43f83bea-kube-api-access-rxnqn\") pod \"ovn-operator-controller-manager-bf6d4f946-mrws8\" (UID: \"e03b4a44-b8b3-46db-a760-cb3f43f83bea\") " pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-mrws8" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.521102 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-9dbdf6486-mjkf5"] Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.522234 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-9dbdf6486-mjkf5" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.536850 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-mt42d" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.555080 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-68d988df55-4xm5m"] Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.573180 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-9dbdf6486-mjkf5"] Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.573545 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-7b88bfc995-fvgtr" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.606029 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-92gv2\" (UniqueName: \"kubernetes.io/projected/299d2ab3-3f1b-4464-ab11-22aec9d915dd-kube-api-access-92gv2\") pod \"telemetry-operator-controller-manager-68d988df55-4xm5m\" (UID: \"299d2ab3-3f1b-4464-ab11-22aec9d915dd\") " pod="openstack-operators/telemetry-operator-controller-manager-68d988df55-4xm5m" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.606092 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4743ff0b-8d16-4ee3-beb9-091a85bc7182-cert\") pod \"openstack-baremetal-operator-controller-manager-596c464d775rs5r\" (UID: \"4743ff0b-8d16-4ee3-beb9-091a85bc7182\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-596c464d775rs5r" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.606172 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zwxhm\" (UniqueName: \"kubernetes.io/projected/5965fe2f-f268-418d-b039-682eb20f87ea-kube-api-access-zwxhm\") pod \"swift-operator-controller-manager-bb586bbf4-75hnw\" (UID: \"5965fe2f-f268-418d-b039-682eb20f87ea\") " pod="openstack-operators/swift-operator-controller-manager-bb586bbf4-75hnw" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.606245 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qfdnd\" (UniqueName: \"kubernetes.io/projected/4743ff0b-8d16-4ee3-beb9-091a85bc7182-kube-api-access-qfdnd\") pod \"openstack-baremetal-operator-controller-manager-596c464d775rs5r\" (UID: \"4743ff0b-8d16-4ee3-beb9-091a85bc7182\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-596c464d775rs5r" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.606290 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dz849\" (UniqueName: \"kubernetes.io/projected/9224f0b2-2621-43c8-b061-66c826994814-kube-api-access-dz849\") pod \"placement-operator-controller-manager-9b6f8f78c-8j9cf\" (UID: \"9224f0b2-2621-43c8-b061-66c826994814\") " pod="openstack-operators/placement-operator-controller-manager-9b6f8f78c-8j9cf" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.606328 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxnqn\" (UniqueName: \"kubernetes.io/projected/e03b4a44-b8b3-46db-a760-cb3f43f83bea-kube-api-access-rxnqn\") pod \"ovn-operator-controller-manager-bf6d4f946-mrws8\" (UID: \"e03b4a44-b8b3-46db-a760-cb3f43f83bea\") " pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-mrws8" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.606434 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cjxdk\" (UniqueName: \"kubernetes.io/projected/72bc4794-4890-40dc-9d78-6a02f2983ddf-kube-api-access-cjxdk\") pod \"test-operator-controller-manager-6c866cfdcb-j9dht\" (UID: \"72bc4794-4890-40dc-9d78-6a02f2983ddf\") " pod="openstack-operators/test-operator-controller-manager-6c866cfdcb-j9dht" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.616319 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-555f86cbf8-l5n82"] Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.616900 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-2qtrb" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.620658 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-555f86cbf8-l5n82" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.622927 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-25mrr" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.623485 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.623720 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.624587 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-555f86cbf8-l5n82"] Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.631674 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zwxhm\" (UniqueName: \"kubernetes.io/projected/5965fe2f-f268-418d-b039-682eb20f87ea-kube-api-access-zwxhm\") pod \"swift-operator-controller-manager-bb586bbf4-75hnw\" (UID: \"5965fe2f-f268-418d-b039-682eb20f87ea\") " pod="openstack-operators/swift-operator-controller-manager-bb586bbf4-75hnw" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.633708 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxnqn\" (UniqueName: \"kubernetes.io/projected/e03b4a44-b8b3-46db-a760-cb3f43f83bea-kube-api-access-rxnqn\") pod \"ovn-operator-controller-manager-bf6d4f946-mrws8\" (UID: \"e03b4a44-b8b3-46db-a760-cb3f43f83bea\") " pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-mrws8" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.642243 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-trrvg" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.644979 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dz849\" (UniqueName: \"kubernetes.io/projected/9224f0b2-2621-43c8-b061-66c826994814-kube-api-access-dz849\") pod \"placement-operator-controller-manager-9b6f8f78c-8j9cf\" (UID: \"9224f0b2-2621-43c8-b061-66c826994814\") " pod="openstack-operators/placement-operator-controller-manager-9b6f8f78c-8j9cf" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.648230 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cwjm5"] Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.654502 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cwjm5" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.657056 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cwjm5"] Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.658328 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-ctdlm" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.661394 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-p4tsz" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.698551 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-mrws8" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.708855 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qfdnd\" (UniqueName: \"kubernetes.io/projected/4743ff0b-8d16-4ee3-beb9-091a85bc7182-kube-api-access-qfdnd\") pod \"openstack-baremetal-operator-controller-manager-596c464d775rs5r\" (UID: \"4743ff0b-8d16-4ee3-beb9-091a85bc7182\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-596c464d775rs5r" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.708935 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed-cert\") pod \"infra-operator-controller-manager-6d99759cf-89lvl\" (UID: \"b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed\") " pod="openstack-operators/infra-operator-controller-manager-6d99759cf-89lvl" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.708999 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cjxdk\" (UniqueName: \"kubernetes.io/projected/72bc4794-4890-40dc-9d78-6a02f2983ddf-kube-api-access-cjxdk\") pod \"test-operator-controller-manager-6c866cfdcb-j9dht\" (UID: \"72bc4794-4890-40dc-9d78-6a02f2983ddf\") " pod="openstack-operators/test-operator-controller-manager-6c866cfdcb-j9dht" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.709045 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-92gv2\" (UniqueName: \"kubernetes.io/projected/299d2ab3-3f1b-4464-ab11-22aec9d915dd-kube-api-access-92gv2\") pod \"telemetry-operator-controller-manager-68d988df55-4xm5m\" (UID: \"299d2ab3-3f1b-4464-ab11-22aec9d915dd\") " pod="openstack-operators/telemetry-operator-controller-manager-68d988df55-4xm5m" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.709069 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4743ff0b-8d16-4ee3-beb9-091a85bc7182-cert\") pod \"openstack-baremetal-operator-controller-manager-596c464d775rs5r\" (UID: \"4743ff0b-8d16-4ee3-beb9-091a85bc7182\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-596c464d775rs5r" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.709099 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l847z\" (UniqueName: \"kubernetes.io/projected/9ba478cb-baeb-4955-b84b-872aacf97065-kube-api-access-l847z\") pod \"watcher-operator-controller-manager-9dbdf6486-mjkf5\" (UID: \"9ba478cb-baeb-4955-b84b-872aacf97065\") " pod="openstack-operators/watcher-operator-controller-manager-9dbdf6486-mjkf5" Jan 05 22:07:54 crc kubenswrapper[4910]: E0105 22:07:54.709326 4910 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 05 22:07:54 crc kubenswrapper[4910]: E0105 22:07:54.709394 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed-cert podName:b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed nodeName:}" failed. No retries permitted until 2026-01-05 22:07:55.709369447 +0000 UTC m=+1007.286867117 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed-cert") pod "infra-operator-controller-manager-6d99759cf-89lvl" (UID: "b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed") : secret "infra-operator-webhook-server-cert" not found Jan 05 22:07:54 crc kubenswrapper[4910]: E0105 22:07:54.710036 4910 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 05 22:07:54 crc kubenswrapper[4910]: E0105 22:07:54.710167 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4743ff0b-8d16-4ee3-beb9-091a85bc7182-cert podName:4743ff0b-8d16-4ee3-beb9-091a85bc7182 nodeName:}" failed. No retries permitted until 2026-01-05 22:07:55.210137196 +0000 UTC m=+1006.787634866 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4743ff0b-8d16-4ee3-beb9-091a85bc7182-cert") pod "openstack-baremetal-operator-controller-manager-596c464d775rs5r" (UID: "4743ff0b-8d16-4ee3-beb9-091a85bc7182") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.744677 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qfdnd\" (UniqueName: \"kubernetes.io/projected/4743ff0b-8d16-4ee3-beb9-091a85bc7182-kube-api-access-qfdnd\") pod \"openstack-baremetal-operator-controller-manager-596c464d775rs5r\" (UID: \"4743ff0b-8d16-4ee3-beb9-091a85bc7182\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-596c464d775rs5r" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.751190 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cjxdk\" (UniqueName: \"kubernetes.io/projected/72bc4794-4890-40dc-9d78-6a02f2983ddf-kube-api-access-cjxdk\") pod \"test-operator-controller-manager-6c866cfdcb-j9dht\" (UID: \"72bc4794-4890-40dc-9d78-6a02f2983ddf\") " pod="openstack-operators/test-operator-controller-manager-6c866cfdcb-j9dht" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.752268 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-92gv2\" (UniqueName: \"kubernetes.io/projected/299d2ab3-3f1b-4464-ab11-22aec9d915dd-kube-api-access-92gv2\") pod \"telemetry-operator-controller-manager-68d988df55-4xm5m\" (UID: \"299d2ab3-3f1b-4464-ab11-22aec9d915dd\") " pod="openstack-operators/telemetry-operator-controller-manager-68d988df55-4xm5m" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.811580 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l847z\" (UniqueName: \"kubernetes.io/projected/9ba478cb-baeb-4955-b84b-872aacf97065-kube-api-access-l847z\") pod \"watcher-operator-controller-manager-9dbdf6486-mjkf5\" (UID: \"9ba478cb-baeb-4955-b84b-872aacf97065\") " pod="openstack-operators/watcher-operator-controller-manager-9dbdf6486-mjkf5" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.811642 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-metrics-certs\") pod \"openstack-operator-controller-manager-555f86cbf8-l5n82\" (UID: \"2c5927c5-767c-49f2-91f1-c46608c506ff\") " pod="openstack-operators/openstack-operator-controller-manager-555f86cbf8-l5n82" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.811665 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nffpn\" (UniqueName: \"kubernetes.io/projected/2c5927c5-767c-49f2-91f1-c46608c506ff-kube-api-access-nffpn\") pod \"openstack-operator-controller-manager-555f86cbf8-l5n82\" (UID: \"2c5927c5-767c-49f2-91f1-c46608c506ff\") " pod="openstack-operators/openstack-operator-controller-manager-555f86cbf8-l5n82" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.811715 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-webhook-certs\") pod \"openstack-operator-controller-manager-555f86cbf8-l5n82\" (UID: \"2c5927c5-767c-49f2-91f1-c46608c506ff\") " pod="openstack-operators/openstack-operator-controller-manager-555f86cbf8-l5n82" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.811756 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4bcp7\" (UniqueName: \"kubernetes.io/projected/31f503c0-3017-4c01-8594-7b6775a0f397-kube-api-access-4bcp7\") pod \"rabbitmq-cluster-operator-manager-668c99d594-cwjm5\" (UID: \"31f503c0-3017-4c01-8594-7b6775a0f397\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cwjm5" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.818450 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-bb586bbf4-75hnw" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.841995 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l847z\" (UniqueName: \"kubernetes.io/projected/9ba478cb-baeb-4955-b84b-872aacf97065-kube-api-access-l847z\") pod \"watcher-operator-controller-manager-9dbdf6486-mjkf5\" (UID: \"9ba478cb-baeb-4955-b84b-872aacf97065\") " pod="openstack-operators/watcher-operator-controller-manager-9dbdf6486-mjkf5" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.849993 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-9b6f8f78c-8j9cf" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.915217 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-metrics-certs\") pod \"openstack-operator-controller-manager-555f86cbf8-l5n82\" (UID: \"2c5927c5-767c-49f2-91f1-c46608c506ff\") " pod="openstack-operators/openstack-operator-controller-manager-555f86cbf8-l5n82" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.915269 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nffpn\" (UniqueName: \"kubernetes.io/projected/2c5927c5-767c-49f2-91f1-c46608c506ff-kube-api-access-nffpn\") pod \"openstack-operator-controller-manager-555f86cbf8-l5n82\" (UID: \"2c5927c5-767c-49f2-91f1-c46608c506ff\") " pod="openstack-operators/openstack-operator-controller-manager-555f86cbf8-l5n82" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.915311 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-webhook-certs\") pod \"openstack-operator-controller-manager-555f86cbf8-l5n82\" (UID: \"2c5927c5-767c-49f2-91f1-c46608c506ff\") " pod="openstack-operators/openstack-operator-controller-manager-555f86cbf8-l5n82" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.915353 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4bcp7\" (UniqueName: \"kubernetes.io/projected/31f503c0-3017-4c01-8594-7b6775a0f397-kube-api-access-4bcp7\") pod \"rabbitmq-cluster-operator-manager-668c99d594-cwjm5\" (UID: \"31f503c0-3017-4c01-8594-7b6775a0f397\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cwjm5" Jan 05 22:07:54 crc kubenswrapper[4910]: E0105 22:07:54.915940 4910 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 05 22:07:54 crc kubenswrapper[4910]: E0105 22:07:54.915990 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-metrics-certs podName:2c5927c5-767c-49f2-91f1-c46608c506ff nodeName:}" failed. No retries permitted until 2026-01-05 22:07:55.415971386 +0000 UTC m=+1006.993469056 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-metrics-certs") pod "openstack-operator-controller-manager-555f86cbf8-l5n82" (UID: "2c5927c5-767c-49f2-91f1-c46608c506ff") : secret "metrics-server-cert" not found Jan 05 22:07:54 crc kubenswrapper[4910]: E0105 22:07:54.916907 4910 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 05 22:07:54 crc kubenswrapper[4910]: E0105 22:07:54.916939 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-webhook-certs podName:2c5927c5-767c-49f2-91f1-c46608c506ff nodeName:}" failed. No retries permitted until 2026-01-05 22:07:55.41693029 +0000 UTC m=+1006.994427960 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-webhook-certs") pod "openstack-operator-controller-manager-555f86cbf8-l5n82" (UID: "2c5927c5-767c-49f2-91f1-c46608c506ff") : secret "webhook-server-cert" not found Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.937090 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nffpn\" (UniqueName: \"kubernetes.io/projected/2c5927c5-767c-49f2-91f1-c46608c506ff-kube-api-access-nffpn\") pod \"openstack-operator-controller-manager-555f86cbf8-l5n82\" (UID: \"2c5927c5-767c-49f2-91f1-c46608c506ff\") " pod="openstack-operators/openstack-operator-controller-manager-555f86cbf8-l5n82" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.939393 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4bcp7\" (UniqueName: \"kubernetes.io/projected/31f503c0-3017-4c01-8594-7b6775a0f397-kube-api-access-4bcp7\") pod \"rabbitmq-cluster-operator-manager-668c99d594-cwjm5\" (UID: \"31f503c0-3017-4c01-8594-7b6775a0f397\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cwjm5" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.966703 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-6c866cfdcb-j9dht" Jan 05 22:07:54 crc kubenswrapper[4910]: I0105 22:07:54.989512 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-68d988df55-4xm5m" Jan 05 22:07:55 crc kubenswrapper[4910]: I0105 22:07:55.004814 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-9dbdf6486-mjkf5" Jan 05 22:07:55 crc kubenswrapper[4910]: I0105 22:07:55.088094 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cwjm5" Jan 05 22:07:55 crc kubenswrapper[4910]: I0105 22:07:55.091930 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-78979fc445-v4jmq"] Jan 05 22:07:55 crc kubenswrapper[4910]: I0105 22:07:55.156197 4910 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 05 22:07:55 crc kubenswrapper[4910]: I0105 22:07:55.221050 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4743ff0b-8d16-4ee3-beb9-091a85bc7182-cert\") pod \"openstack-baremetal-operator-controller-manager-596c464d775rs5r\" (UID: \"4743ff0b-8d16-4ee3-beb9-091a85bc7182\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-596c464d775rs5r" Jan 05 22:07:55 crc kubenswrapper[4910]: E0105 22:07:55.221240 4910 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 05 22:07:55 crc kubenswrapper[4910]: E0105 22:07:55.221361 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4743ff0b-8d16-4ee3-beb9-091a85bc7182-cert podName:4743ff0b-8d16-4ee3-beb9-091a85bc7182 nodeName:}" failed. No retries permitted until 2026-01-05 22:07:56.221332704 +0000 UTC m=+1007.798830374 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4743ff0b-8d16-4ee3-beb9-091a85bc7182-cert") pod "openstack-baremetal-operator-controller-manager-596c464d775rs5r" (UID: "4743ff0b-8d16-4ee3-beb9-091a85bc7182") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 05 22:07:55 crc kubenswrapper[4910]: I0105 22:07:55.326356 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-78979fc445-v4jmq" event={"ID":"10a1f9a3-7d22-4e02-8b5b-4ae1374194cf","Type":"ContainerStarted","Data":"a45591597a103f15f4b59f0b2b97a26590c3a739d6a373b5a8ed89b912b36295"} Jan 05 22:07:55 crc kubenswrapper[4910]: I0105 22:07:55.424719 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-metrics-certs\") pod \"openstack-operator-controller-manager-555f86cbf8-l5n82\" (UID: \"2c5927c5-767c-49f2-91f1-c46608c506ff\") " pod="openstack-operators/openstack-operator-controller-manager-555f86cbf8-l5n82" Jan 05 22:07:55 crc kubenswrapper[4910]: I0105 22:07:55.424837 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-webhook-certs\") pod \"openstack-operator-controller-manager-555f86cbf8-l5n82\" (UID: \"2c5927c5-767c-49f2-91f1-c46608c506ff\") " pod="openstack-operators/openstack-operator-controller-manager-555f86cbf8-l5n82" Jan 05 22:07:55 crc kubenswrapper[4910]: E0105 22:07:55.425071 4910 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 05 22:07:55 crc kubenswrapper[4910]: E0105 22:07:55.425154 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-webhook-certs podName:2c5927c5-767c-49f2-91f1-c46608c506ff nodeName:}" failed. No retries permitted until 2026-01-05 22:07:56.425133075 +0000 UTC m=+1008.002630745 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-webhook-certs") pod "openstack-operator-controller-manager-555f86cbf8-l5n82" (UID: "2c5927c5-767c-49f2-91f1-c46608c506ff") : secret "webhook-server-cert" not found Jan 05 22:07:55 crc kubenswrapper[4910]: E0105 22:07:55.425234 4910 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 05 22:07:55 crc kubenswrapper[4910]: E0105 22:07:55.425346 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-metrics-certs podName:2c5927c5-767c-49f2-91f1-c46608c506ff nodeName:}" failed. No retries permitted until 2026-01-05 22:07:56.425312739 +0000 UTC m=+1008.002810409 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-metrics-certs") pod "openstack-operator-controller-manager-555f86cbf8-l5n82" (UID: "2c5927c5-767c-49f2-91f1-c46608c506ff") : secret "metrics-server-cert" not found Jan 05 22:07:55 crc kubenswrapper[4910]: I0105 22:07:55.519214 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-7f5ddd8d7b-ht2cs"] Jan 05 22:07:55 crc kubenswrapper[4910]: I0105 22:07:55.526661 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-7b549fc966-67s8n"] Jan 05 22:07:55 crc kubenswrapper[4910]: W0105 22:07:55.553671 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaf80b86a_ae5b_4e42_b4c7_9fc033d4fd26.slice/crio-81c3f7380689180d66a7e25e6e632d58259ff405bfe0e3b230dc1d972369fcf8 WatchSource:0}: Error finding container 81c3f7380689180d66a7e25e6e632d58259ff405bfe0e3b230dc1d972369fcf8: Status 404 returned error can't find the container with id 81c3f7380689180d66a7e25e6e632d58259ff405bfe0e3b230dc1d972369fcf8 Jan 05 22:07:55 crc kubenswrapper[4910]: W0105 22:07:55.556351 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod69c2eea7_1ac0_42b2_b1b7_d4ffaba1a9b7.slice/crio-19315393a55d6e3f786c7d096045e864c74f93eb7b751d705754c52045831287 WatchSource:0}: Error finding container 19315393a55d6e3f786c7d096045e864c74f93eb7b751d705754c52045831287: Status 404 returned error can't find the container with id 19315393a55d6e3f786c7d096045e864c74f93eb7b751d705754c52045831287 Jan 05 22:07:55 crc kubenswrapper[4910]: I0105 22:07:55.558777 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-658dd65b86-hjhdr"] Jan 05 22:07:55 crc kubenswrapper[4910]: I0105 22:07:55.568689 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-66f8b87655-d6q5k"] Jan 05 22:07:55 crc kubenswrapper[4910]: I0105 22:07:55.574876 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-f6f74d6db-t7xl6"] Jan 05 22:07:55 crc kubenswrapper[4910]: I0105 22:07:55.744297 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed-cert\") pod \"infra-operator-controller-manager-6d99759cf-89lvl\" (UID: \"b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed\") " pod="openstack-operators/infra-operator-controller-manager-6d99759cf-89lvl" Jan 05 22:07:55 crc kubenswrapper[4910]: E0105 22:07:55.744527 4910 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 05 22:07:55 crc kubenswrapper[4910]: E0105 22:07:55.744654 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed-cert podName:b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed nodeName:}" failed. No retries permitted until 2026-01-05 22:07:57.744619497 +0000 UTC m=+1009.322117177 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed-cert") pod "infra-operator-controller-manager-6d99759cf-89lvl" (UID: "b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed") : secret "infra-operator-webhook-server-cert" not found Jan 05 22:07:55 crc kubenswrapper[4910]: I0105 22:07:55.939501 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-6c866cfdcb-j9dht"] Jan 05 22:07:55 crc kubenswrapper[4910]: W0105 22:07:55.950907 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod72bc4794_4890_40dc_9d78_6a02f2983ddf.slice/crio-5f35e70782b25fa88df43ca65834de5dec5f60aff62d121ac01778b669b61016 WatchSource:0}: Error finding container 5f35e70782b25fa88df43ca65834de5dec5f60aff62d121ac01778b669b61016: Status 404 returned error can't find the container with id 5f35e70782b25fa88df43ca65834de5dec5f60aff62d121ac01778b669b61016 Jan 05 22:07:55 crc kubenswrapper[4910]: I0105 22:07:55.968179 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-9b6f8f78c-8j9cf"] Jan 05 22:07:55 crc kubenswrapper[4910]: I0105 22:07:55.975285 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-7b88bfc995-fvgtr"] Jan 05 22:07:55 crc kubenswrapper[4910]: I0105 22:07:55.981170 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-bb586bbf4-75hnw"] Jan 05 22:07:55 crc kubenswrapper[4910]: I0105 22:07:55.988191 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-598945d5b8-n7nvl"] Jan 05 22:07:56 crc kubenswrapper[4910]: I0105 22:07:56.011191 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-68c649d9d-p4tsz"] Jan 05 22:07:56 crc kubenswrapper[4910]: I0105 22:07:56.017101 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-568985c78-wk9gv"] Jan 05 22:07:56 crc kubenswrapper[4910]: I0105 22:07:56.028293 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-f99f54bc8-hfxg8"] Jan 05 22:07:56 crc kubenswrapper[4910]: I0105 22:07:56.040286 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-9dbdf6486-mjkf5"] Jan 05 22:07:56 crc kubenswrapper[4910]: I0105 22:07:56.046289 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-2qtrb"] Jan 05 22:07:56 crc kubenswrapper[4910]: I0105 22:07:56.052223 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7cd87b778f-trrvg"] Jan 05 22:07:56 crc kubenswrapper[4910]: I0105 22:07:56.059512 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-68d988df55-4xm5m"] Jan 05 22:07:56 crc kubenswrapper[4910]: I0105 22:07:56.067430 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-bf6d4f946-mrws8"] Jan 05 22:07:56 crc kubenswrapper[4910]: W0105 22:07:56.068928 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podab5bdb68_d0c3_436b_a7d5_36fe8be5bd90.slice/crio-cfb6d8e39fc58d5e7d183dc99be36c85f59963d7359294a28dbc388058531b2d WatchSource:0}: Error finding container cfb6d8e39fc58d5e7d183dc99be36c85f59963d7359294a28dbc388058531b2d: Status 404 returned error can't find the container with id cfb6d8e39fc58d5e7d183dc99be36c85f59963d7359294a28dbc388058531b2d Jan 05 22:07:56 crc kubenswrapper[4910]: I0105 22:07:56.069706 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cwjm5"] Jan 05 22:07:56 crc kubenswrapper[4910]: W0105 22:07:56.086808 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod299d2ab3_3f1b_4464_ab11_22aec9d915dd.slice/crio-1bb6f78e8716d109209df44bb4e3753f42df1a4c4e0ccb067c44e38de9550b61 WatchSource:0}: Error finding container 1bb6f78e8716d109209df44bb4e3753f42df1a4c4e0ccb067c44e38de9550b61: Status 404 returned error can't find the container with id 1bb6f78e8716d109209df44bb4e3753f42df1a4c4e0ccb067c44e38de9550b61 Jan 05 22:07:56 crc kubenswrapper[4910]: E0105 22:07:56.113318 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:d9a3694865a7d54ee96397add18c3898886e98d079aa20876a0f4de1fa7a7168,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xnszh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-68c649d9d-p4tsz_openstack-operators(ac3384d0-7a86-4e80-94a7-e0ff9bd32143): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 05 22:07:56 crc kubenswrapper[4910]: E0105 22:07:56.113344 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:f0ece9a81e4be3dbc1ff752a951970380546d8c0dea910953f862c219444b97a,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-l847z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-9dbdf6486-mjkf5_openstack-operators(9ba478cb-baeb-4955-b84b-872aacf97065): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 05 22:07:56 crc kubenswrapper[4910]: E0105 22:07:56.113439 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:0b3fb69f35c151895d3dffd514974a9f9fe1c77c3bca69b78b81efb183cf4557,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-bvm2l,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-7cd87b778f-trrvg_openstack-operators(dee3cccb-8251-41b8-82ec-a696001f803d): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 05 22:07:56 crc kubenswrapper[4910]: E0105 22:07:56.113482 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:879d3d679b58ae84419b7907ad092ad4d24bcc9222ce621ce464fd0fea347b0c,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-8c29l,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-568985c78-wk9gv_openstack-operators(84b7e891-c710-431d-81e3-3d0fef0bf08e): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 05 22:07:56 crc kubenswrapper[4910]: E0105 22:07:56.114471 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rxnqn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-bf6d4f946-mrws8_openstack-operators(e03b4a44-b8b3-46db-a760-cb3f43f83bea): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 05 22:07:56 crc kubenswrapper[4910]: E0105 22:07:56.114636 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-smtjc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-5fbbf8b6cc-2qtrb_openstack-operators(aa22d9f9-f865-495a-a0f0-a8aa424051aa): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 05 22:07:56 crc kubenswrapper[4910]: E0105 22:07:56.115077 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/keystone-operator-controller-manager-568985c78-wk9gv" podUID="84b7e891-c710-431d-81e3-3d0fef0bf08e" Jan 05 22:07:56 crc kubenswrapper[4910]: E0105 22:07:56.115197 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-p4tsz" podUID="ac3384d0-7a86-4e80-94a7-e0ff9bd32143" Jan 05 22:07:56 crc kubenswrapper[4910]: E0105 22:07:56.115230 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/watcher-operator-controller-manager-9dbdf6486-mjkf5" podUID="9ba478cb-baeb-4955-b84b-872aacf97065" Jan 05 22:07:56 crc kubenswrapper[4910]: E0105 22:07:56.115261 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-trrvg" podUID="dee3cccb-8251-41b8-82ec-a696001f803d" Jan 05 22:07:56 crc kubenswrapper[4910]: E0105 22:07:56.118054 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-2qtrb" podUID="aa22d9f9-f865-495a-a0f0-a8aa424051aa" Jan 05 22:07:56 crc kubenswrapper[4910]: E0105 22:07:56.118227 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-mrws8" podUID="e03b4a44-b8b3-46db-a760-cb3f43f83bea" Jan 05 22:07:56 crc kubenswrapper[4910]: W0105 22:07:56.131199 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod31f503c0_3017_4c01_8594_7b6775a0f397.slice/crio-0b08614ff9420a5284870beae0f09e29b735e8d7194f8ae23228de1010f43a87 WatchSource:0}: Error finding container 0b08614ff9420a5284870beae0f09e29b735e8d7194f8ae23228de1010f43a87: Status 404 returned error can't find the container with id 0b08614ff9420a5284870beae0f09e29b735e8d7194f8ae23228de1010f43a87 Jan 05 22:07:56 crc kubenswrapper[4910]: E0105 22:07:56.139925 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4bcp7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-cwjm5_openstack-operators(31f503c0-3017-4c01-8594-7b6775a0f397): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 05 22:07:56 crc kubenswrapper[4910]: E0105 22:07:56.141593 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cwjm5" podUID="31f503c0-3017-4c01-8594-7b6775a0f397" Jan 05 22:07:56 crc kubenswrapper[4910]: I0105 22:07:56.269394 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4743ff0b-8d16-4ee3-beb9-091a85bc7182-cert\") pod \"openstack-baremetal-operator-controller-manager-596c464d775rs5r\" (UID: \"4743ff0b-8d16-4ee3-beb9-091a85bc7182\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-596c464d775rs5r" Jan 05 22:07:56 crc kubenswrapper[4910]: E0105 22:07:56.269609 4910 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 05 22:07:56 crc kubenswrapper[4910]: E0105 22:07:56.269755 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4743ff0b-8d16-4ee3-beb9-091a85bc7182-cert podName:4743ff0b-8d16-4ee3-beb9-091a85bc7182 nodeName:}" failed. No retries permitted until 2026-01-05 22:07:58.269723116 +0000 UTC m=+1009.847220776 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4743ff0b-8d16-4ee3-beb9-091a85bc7182-cert") pod "openstack-baremetal-operator-controller-manager-596c464d775rs5r" (UID: "4743ff0b-8d16-4ee3-beb9-091a85bc7182") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 05 22:07:56 crc kubenswrapper[4910]: I0105 22:07:56.336541 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-568985c78-wk9gv" event={"ID":"84b7e891-c710-431d-81e3-3d0fef0bf08e","Type":"ContainerStarted","Data":"0214d776832c0ab6dedce7eb9a77e8853576d13d700d2501f0494b547feab50f"} Jan 05 22:07:56 crc kubenswrapper[4910]: E0105 22:07:56.338054 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:879d3d679b58ae84419b7907ad092ad4d24bcc9222ce621ce464fd0fea347b0c\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-568985c78-wk9gv" podUID="84b7e891-c710-431d-81e3-3d0fef0bf08e" Jan 05 22:07:56 crc kubenswrapper[4910]: I0105 22:07:56.340963 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-7b549fc966-67s8n" event={"ID":"c3362390-1824-422e-8a9b-dfcfc1098cfd","Type":"ContainerStarted","Data":"b6cd280818ef5f62355bee3585cf4c84a4221d65f5d045767565ddd404605814"} Jan 05 22:07:56 crc kubenswrapper[4910]: I0105 22:07:56.343130 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-f99f54bc8-hfxg8" event={"ID":"6bfbc4ba-ffa7-42df-9123-945bbe818352","Type":"ContainerStarted","Data":"1c1f0c7555520b634793fdecfd3f19f9718ad49e45a3dd5838e59a7478cb9343"} Jan 05 22:07:56 crc kubenswrapper[4910]: I0105 22:07:56.344585 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-7f5ddd8d7b-ht2cs" event={"ID":"98338ebd-fd6a-49de-a042-edb94e115570","Type":"ContainerStarted","Data":"a8cba13b9c63e1736cc722b585efd50c69a51f26c0a00bf679173aaf7f8dde41"} Jan 05 22:07:56 crc kubenswrapper[4910]: I0105 22:07:56.345926 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-mrws8" event={"ID":"e03b4a44-b8b3-46db-a760-cb3f43f83bea","Type":"ContainerStarted","Data":"7a760d91af5572ce665259f60d7a0088ef6590da0476f6c4ace95602d08c6a63"} Jan 05 22:07:56 crc kubenswrapper[4910]: E0105 22:07:56.347230 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-mrws8" podUID="e03b4a44-b8b3-46db-a760-cb3f43f83bea" Jan 05 22:07:56 crc kubenswrapper[4910]: I0105 22:07:56.347573 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cwjm5" event={"ID":"31f503c0-3017-4c01-8594-7b6775a0f397","Type":"ContainerStarted","Data":"0b08614ff9420a5284870beae0f09e29b735e8d7194f8ae23228de1010f43a87"} Jan 05 22:07:56 crc kubenswrapper[4910]: E0105 22:07:56.348474 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cwjm5" podUID="31f503c0-3017-4c01-8594-7b6775a0f397" Jan 05 22:07:56 crc kubenswrapper[4910]: I0105 22:07:56.365430 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-9dbdf6486-mjkf5" event={"ID":"9ba478cb-baeb-4955-b84b-872aacf97065","Type":"ContainerStarted","Data":"7b78f70df84c31b5d9fe9dc129de221c069da6f4180dfdd3f9247a564c40a820"} Jan 05 22:07:56 crc kubenswrapper[4910]: E0105 22:07:56.367468 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:f0ece9a81e4be3dbc1ff752a951970380546d8c0dea910953f862c219444b97a\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-9dbdf6486-mjkf5" podUID="9ba478cb-baeb-4955-b84b-872aacf97065" Jan 05 22:07:56 crc kubenswrapper[4910]: I0105 22:07:56.378794 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-658dd65b86-hjhdr" event={"ID":"69c2eea7-1ac0-42b2-b1b7-d4ffaba1a9b7","Type":"ContainerStarted","Data":"19315393a55d6e3f786c7d096045e864c74f93eb7b751d705754c52045831287"} Jan 05 22:07:56 crc kubenswrapper[4910]: I0105 22:07:56.382262 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-6c866cfdcb-j9dht" event={"ID":"72bc4794-4890-40dc-9d78-6a02f2983ddf","Type":"ContainerStarted","Data":"5f35e70782b25fa88df43ca65834de5dec5f60aff62d121ac01778b669b61016"} Jan 05 22:07:56 crc kubenswrapper[4910]: I0105 22:07:56.405437 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-68d988df55-4xm5m" event={"ID":"299d2ab3-3f1b-4464-ab11-22aec9d915dd","Type":"ContainerStarted","Data":"1bb6f78e8716d109209df44bb4e3753f42df1a4c4e0ccb067c44e38de9550b61"} Jan 05 22:07:56 crc kubenswrapper[4910]: I0105 22:07:56.407396 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-2qtrb" event={"ID":"aa22d9f9-f865-495a-a0f0-a8aa424051aa","Type":"ContainerStarted","Data":"ada6e9359de699fea7b6b666866e0fd0a7f64ef94b28842d3d4c9d0bac511270"} Jan 05 22:07:56 crc kubenswrapper[4910]: E0105 22:07:56.409205 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670\\\"\"" pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-2qtrb" podUID="aa22d9f9-f865-495a-a0f0-a8aa424051aa" Jan 05 22:07:56 crc kubenswrapper[4910]: I0105 22:07:56.412429 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-66f8b87655-d6q5k" event={"ID":"af80b86a-ae5b-4e42-b4c7-9fc033d4fd26","Type":"ContainerStarted","Data":"81c3f7380689180d66a7e25e6e632d58259ff405bfe0e3b230dc1d972369fcf8"} Jan 05 22:07:56 crc kubenswrapper[4910]: I0105 22:07:56.417291 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-598945d5b8-n7nvl" event={"ID":"ab5bdb68-d0c3-436b-a7d5-36fe8be5bd90","Type":"ContainerStarted","Data":"cfb6d8e39fc58d5e7d183dc99be36c85f59963d7359294a28dbc388058531b2d"} Jan 05 22:07:56 crc kubenswrapper[4910]: I0105 22:07:56.422670 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-7b88bfc995-fvgtr" event={"ID":"c4b3f034-ce14-4081-a47c-feac32565388","Type":"ContainerStarted","Data":"29b9844e81f0dac63ef3c38d0938a05a6b43b84295de3c412f71555c9715cfc4"} Jan 05 22:07:56 crc kubenswrapper[4910]: I0105 22:07:56.433585 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-trrvg" event={"ID":"dee3cccb-8251-41b8-82ec-a696001f803d","Type":"ContainerStarted","Data":"1072cd38730d751af0f8445e926193e0b5568729549b893df5eab09fd72eb555"} Jan 05 22:07:56 crc kubenswrapper[4910]: I0105 22:07:56.468957 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-f6f74d6db-t7xl6" event={"ID":"11188457-aabb-45d0-85d5-3ae1fc7a085f","Type":"ContainerStarted","Data":"ffff1e87c1c11096a9714b0635d0efb66c468db01e30571f4bfdcfcba07bc68b"} Jan 05 22:07:56 crc kubenswrapper[4910]: E0105 22:07:56.470374 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:0b3fb69f35c151895d3dffd514974a9f9fe1c77c3bca69b78b81efb183cf4557\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-trrvg" podUID="dee3cccb-8251-41b8-82ec-a696001f803d" Jan 05 22:07:56 crc kubenswrapper[4910]: I0105 22:07:56.470856 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-bb586bbf4-75hnw" event={"ID":"5965fe2f-f268-418d-b039-682eb20f87ea","Type":"ContainerStarted","Data":"a7c4a399c6e7320e5b52ca8ae72a7e3b37ee191fa1c447945b1d5304bce66b7d"} Jan 05 22:07:56 crc kubenswrapper[4910]: I0105 22:07:56.474102 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-p4tsz" event={"ID":"ac3384d0-7a86-4e80-94a7-e0ff9bd32143","Type":"ContainerStarted","Data":"8fbe552bbf1d9a95438422494346e9ffb66d2f59ef3c0cd23912252396e9bb49"} Jan 05 22:07:56 crc kubenswrapper[4910]: E0105 22:07:56.475745 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:d9a3694865a7d54ee96397add18c3898886e98d079aa20876a0f4de1fa7a7168\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-p4tsz" podUID="ac3384d0-7a86-4e80-94a7-e0ff9bd32143" Jan 05 22:07:56 crc kubenswrapper[4910]: I0105 22:07:56.476297 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-9b6f8f78c-8j9cf" event={"ID":"9224f0b2-2621-43c8-b061-66c826994814","Type":"ContainerStarted","Data":"2c956efd8fdd4ca5a2e72f59b51c1f618c56e88b79d9cffb3b8637f89e004297"} Jan 05 22:07:56 crc kubenswrapper[4910]: I0105 22:07:56.477017 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-metrics-certs\") pod \"openstack-operator-controller-manager-555f86cbf8-l5n82\" (UID: \"2c5927c5-767c-49f2-91f1-c46608c506ff\") " pod="openstack-operators/openstack-operator-controller-manager-555f86cbf8-l5n82" Jan 05 22:07:56 crc kubenswrapper[4910]: I0105 22:07:56.477111 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-webhook-certs\") pod \"openstack-operator-controller-manager-555f86cbf8-l5n82\" (UID: \"2c5927c5-767c-49f2-91f1-c46608c506ff\") " pod="openstack-operators/openstack-operator-controller-manager-555f86cbf8-l5n82" Jan 05 22:07:56 crc kubenswrapper[4910]: E0105 22:07:56.477367 4910 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 05 22:07:56 crc kubenswrapper[4910]: E0105 22:07:56.477437 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-webhook-certs podName:2c5927c5-767c-49f2-91f1-c46608c506ff nodeName:}" failed. No retries permitted until 2026-01-05 22:07:58.477416911 +0000 UTC m=+1010.054914581 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-webhook-certs") pod "openstack-operator-controller-manager-555f86cbf8-l5n82" (UID: "2c5927c5-767c-49f2-91f1-c46608c506ff") : secret "webhook-server-cert" not found Jan 05 22:07:56 crc kubenswrapper[4910]: E0105 22:07:56.477512 4910 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 05 22:07:56 crc kubenswrapper[4910]: E0105 22:07:56.477543 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-metrics-certs podName:2c5927c5-767c-49f2-91f1-c46608c506ff nodeName:}" failed. No retries permitted until 2026-01-05 22:07:58.477535524 +0000 UTC m=+1010.055033194 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-metrics-certs") pod "openstack-operator-controller-manager-555f86cbf8-l5n82" (UID: "2c5927c5-767c-49f2-91f1-c46608c506ff") : secret "metrics-server-cert" not found Jan 05 22:07:57 crc kubenswrapper[4910]: E0105 22:07:57.493404 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cwjm5" podUID="31f503c0-3017-4c01-8594-7b6775a0f397" Jan 05 22:07:57 crc kubenswrapper[4910]: E0105 22:07:57.498728 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:879d3d679b58ae84419b7907ad092ad4d24bcc9222ce621ce464fd0fea347b0c\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-568985c78-wk9gv" podUID="84b7e891-c710-431d-81e3-3d0fef0bf08e" Jan 05 22:07:57 crc kubenswrapper[4910]: E0105 22:07:57.498835 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:0b3fb69f35c151895d3dffd514974a9f9fe1c77c3bca69b78b81efb183cf4557\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-trrvg" podUID="dee3cccb-8251-41b8-82ec-a696001f803d" Jan 05 22:07:57 crc kubenswrapper[4910]: E0105 22:07:57.504100 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670\\\"\"" pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-2qtrb" podUID="aa22d9f9-f865-495a-a0f0-a8aa424051aa" Jan 05 22:07:57 crc kubenswrapper[4910]: E0105 22:07:57.504195 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:d9a3694865a7d54ee96397add18c3898886e98d079aa20876a0f4de1fa7a7168\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-p4tsz" podUID="ac3384d0-7a86-4e80-94a7-e0ff9bd32143" Jan 05 22:07:57 crc kubenswrapper[4910]: E0105 22:07:57.504260 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:f0ece9a81e4be3dbc1ff752a951970380546d8c0dea910953f862c219444b97a\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-9dbdf6486-mjkf5" podUID="9ba478cb-baeb-4955-b84b-872aacf97065" Jan 05 22:07:57 crc kubenswrapper[4910]: E0105 22:07:57.504325 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-mrws8" podUID="e03b4a44-b8b3-46db-a760-cb3f43f83bea" Jan 05 22:07:57 crc kubenswrapper[4910]: I0105 22:07:57.806639 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed-cert\") pod \"infra-operator-controller-manager-6d99759cf-89lvl\" (UID: \"b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed\") " pod="openstack-operators/infra-operator-controller-manager-6d99759cf-89lvl" Jan 05 22:07:57 crc kubenswrapper[4910]: E0105 22:07:57.806910 4910 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 05 22:07:57 crc kubenswrapper[4910]: E0105 22:07:57.806975 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed-cert podName:b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed nodeName:}" failed. No retries permitted until 2026-01-05 22:08:01.806952731 +0000 UTC m=+1013.384450401 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed-cert") pod "infra-operator-controller-manager-6d99759cf-89lvl" (UID: "b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed") : secret "infra-operator-webhook-server-cert" not found Jan 05 22:07:58 crc kubenswrapper[4910]: I0105 22:07:58.318049 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4743ff0b-8d16-4ee3-beb9-091a85bc7182-cert\") pod \"openstack-baremetal-operator-controller-manager-596c464d775rs5r\" (UID: \"4743ff0b-8d16-4ee3-beb9-091a85bc7182\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-596c464d775rs5r" Jan 05 22:07:58 crc kubenswrapper[4910]: E0105 22:07:58.318531 4910 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 05 22:07:58 crc kubenswrapper[4910]: E0105 22:07:58.318696 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4743ff0b-8d16-4ee3-beb9-091a85bc7182-cert podName:4743ff0b-8d16-4ee3-beb9-091a85bc7182 nodeName:}" failed. No retries permitted until 2026-01-05 22:08:02.318659002 +0000 UTC m=+1013.896156832 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4743ff0b-8d16-4ee3-beb9-091a85bc7182-cert") pod "openstack-baremetal-operator-controller-manager-596c464d775rs5r" (UID: "4743ff0b-8d16-4ee3-beb9-091a85bc7182") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 05 22:07:58 crc kubenswrapper[4910]: I0105 22:07:58.523243 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-metrics-certs\") pod \"openstack-operator-controller-manager-555f86cbf8-l5n82\" (UID: \"2c5927c5-767c-49f2-91f1-c46608c506ff\") " pod="openstack-operators/openstack-operator-controller-manager-555f86cbf8-l5n82" Jan 05 22:07:58 crc kubenswrapper[4910]: I0105 22:07:58.523352 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-webhook-certs\") pod \"openstack-operator-controller-manager-555f86cbf8-l5n82\" (UID: \"2c5927c5-767c-49f2-91f1-c46608c506ff\") " pod="openstack-operators/openstack-operator-controller-manager-555f86cbf8-l5n82" Jan 05 22:07:58 crc kubenswrapper[4910]: E0105 22:07:58.523674 4910 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 05 22:07:58 crc kubenswrapper[4910]: E0105 22:07:58.523750 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-webhook-certs podName:2c5927c5-767c-49f2-91f1-c46608c506ff nodeName:}" failed. No retries permitted until 2026-01-05 22:08:02.523725264 +0000 UTC m=+1014.101222944 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-webhook-certs") pod "openstack-operator-controller-manager-555f86cbf8-l5n82" (UID: "2c5927c5-767c-49f2-91f1-c46608c506ff") : secret "webhook-server-cert" not found Jan 05 22:07:58 crc kubenswrapper[4910]: E0105 22:07:58.524211 4910 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 05 22:07:58 crc kubenswrapper[4910]: E0105 22:07:58.524244 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-metrics-certs podName:2c5927c5-767c-49f2-91f1-c46608c506ff nodeName:}" failed. No retries permitted until 2026-01-05 22:08:02.524233116 +0000 UTC m=+1014.101730786 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-metrics-certs") pod "openstack-operator-controller-manager-555f86cbf8-l5n82" (UID: "2c5927c5-767c-49f2-91f1-c46608c506ff") : secret "metrics-server-cert" not found Jan 05 22:08:01 crc kubenswrapper[4910]: I0105 22:08:01.902411 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed-cert\") pod \"infra-operator-controller-manager-6d99759cf-89lvl\" (UID: \"b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed\") " pod="openstack-operators/infra-operator-controller-manager-6d99759cf-89lvl" Jan 05 22:08:01 crc kubenswrapper[4910]: E0105 22:08:01.902608 4910 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 05 22:08:01 crc kubenswrapper[4910]: E0105 22:08:01.903045 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed-cert podName:b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed nodeName:}" failed. No retries permitted until 2026-01-05 22:08:09.903003798 +0000 UTC m=+1021.480501468 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed-cert") pod "infra-operator-controller-manager-6d99759cf-89lvl" (UID: "b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed") : secret "infra-operator-webhook-server-cert" not found Jan 05 22:08:02 crc kubenswrapper[4910]: I0105 22:08:02.409926 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4743ff0b-8d16-4ee3-beb9-091a85bc7182-cert\") pod \"openstack-baremetal-operator-controller-manager-596c464d775rs5r\" (UID: \"4743ff0b-8d16-4ee3-beb9-091a85bc7182\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-596c464d775rs5r" Jan 05 22:08:02 crc kubenswrapper[4910]: E0105 22:08:02.410130 4910 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 05 22:08:02 crc kubenswrapper[4910]: E0105 22:08:02.410224 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4743ff0b-8d16-4ee3-beb9-091a85bc7182-cert podName:4743ff0b-8d16-4ee3-beb9-091a85bc7182 nodeName:}" failed. No retries permitted until 2026-01-05 22:08:10.410202188 +0000 UTC m=+1021.987699858 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4743ff0b-8d16-4ee3-beb9-091a85bc7182-cert") pod "openstack-baremetal-operator-controller-manager-596c464d775rs5r" (UID: "4743ff0b-8d16-4ee3-beb9-091a85bc7182") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 05 22:08:02 crc kubenswrapper[4910]: I0105 22:08:02.614044 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-metrics-certs\") pod \"openstack-operator-controller-manager-555f86cbf8-l5n82\" (UID: \"2c5927c5-767c-49f2-91f1-c46608c506ff\") " pod="openstack-operators/openstack-operator-controller-manager-555f86cbf8-l5n82" Jan 05 22:08:02 crc kubenswrapper[4910]: I0105 22:08:02.614429 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-webhook-certs\") pod \"openstack-operator-controller-manager-555f86cbf8-l5n82\" (UID: \"2c5927c5-767c-49f2-91f1-c46608c506ff\") " pod="openstack-operators/openstack-operator-controller-manager-555f86cbf8-l5n82" Jan 05 22:08:02 crc kubenswrapper[4910]: E0105 22:08:02.614291 4910 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 05 22:08:02 crc kubenswrapper[4910]: E0105 22:08:02.614702 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-metrics-certs podName:2c5927c5-767c-49f2-91f1-c46608c506ff nodeName:}" failed. No retries permitted until 2026-01-05 22:08:10.614675346 +0000 UTC m=+1022.192173016 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-metrics-certs") pod "openstack-operator-controller-manager-555f86cbf8-l5n82" (UID: "2c5927c5-767c-49f2-91f1-c46608c506ff") : secret "metrics-server-cert" not found Jan 05 22:08:02 crc kubenswrapper[4910]: E0105 22:08:02.614745 4910 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 05 22:08:02 crc kubenswrapper[4910]: E0105 22:08:02.614990 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-webhook-certs podName:2c5927c5-767c-49f2-91f1-c46608c506ff nodeName:}" failed. No retries permitted until 2026-01-05 22:08:10.614948552 +0000 UTC m=+1022.192446252 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-webhook-certs") pod "openstack-operator-controller-manager-555f86cbf8-l5n82" (UID: "2c5927c5-767c-49f2-91f1-c46608c506ff") : secret "webhook-server-cert" not found Jan 05 22:08:09 crc kubenswrapper[4910]: E0105 22:08:09.697031 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/swift-operator@sha256:df69e4193043476bc71d0e06ac8bc7bbd17f7b624d495aae6b7c5e5b40c9e1e7" Jan 05 22:08:09 crc kubenswrapper[4910]: E0105 22:08:09.697652 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:df69e4193043476bc71d0e06ac8bc7bbd17f7b624d495aae6b7c5e5b40c9e1e7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zwxhm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-bb586bbf4-75hnw_openstack-operators(5965fe2f-f268-418d-b039-682eb20f87ea): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 05 22:08:09 crc kubenswrapper[4910]: E0105 22:08:09.699634 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/swift-operator-controller-manager-bb586bbf4-75hnw" podUID="5965fe2f-f268-418d-b039-682eb20f87ea" Jan 05 22:08:09 crc kubenswrapper[4910]: I0105 22:08:09.959943 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed-cert\") pod \"infra-operator-controller-manager-6d99759cf-89lvl\" (UID: \"b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed\") " pod="openstack-operators/infra-operator-controller-manager-6d99759cf-89lvl" Jan 05 22:08:09 crc kubenswrapper[4910]: E0105 22:08:09.960275 4910 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 05 22:08:09 crc kubenswrapper[4910]: E0105 22:08:09.960623 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed-cert podName:b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed nodeName:}" failed. No retries permitted until 2026-01-05 22:08:25.960585931 +0000 UTC m=+1037.538083621 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed-cert") pod "infra-operator-controller-manager-6d99759cf-89lvl" (UID: "b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed") : secret "infra-operator-webhook-server-cert" not found Jan 05 22:08:10 crc kubenswrapper[4910]: E0105 22:08:10.378795 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/glance-operator@sha256:19345236c6b6bd5ae772e336fa6065c6e94c8990d1bf05d30073ddb95ffffb4d" Jan 05 22:08:10 crc kubenswrapper[4910]: E0105 22:08:10.379035 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/glance-operator@sha256:19345236c6b6bd5ae772e336fa6065c6e94c8990d1bf05d30073ddb95ffffb4d,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5ttcf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-7b549fc966-67s8n_openstack-operators(c3362390-1824-422e-8a9b-dfcfc1098cfd): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 05 22:08:10 crc kubenswrapper[4910]: E0105 22:08:10.380251 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/glance-operator-controller-manager-7b549fc966-67s8n" podUID="c3362390-1824-422e-8a9b-dfcfc1098cfd" Jan 05 22:08:10 crc kubenswrapper[4910]: I0105 22:08:10.470682 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4743ff0b-8d16-4ee3-beb9-091a85bc7182-cert\") pod \"openstack-baremetal-operator-controller-manager-596c464d775rs5r\" (UID: \"4743ff0b-8d16-4ee3-beb9-091a85bc7182\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-596c464d775rs5r" Jan 05 22:08:10 crc kubenswrapper[4910]: E0105 22:08:10.470860 4910 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 05 22:08:10 crc kubenswrapper[4910]: E0105 22:08:10.470920 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4743ff0b-8d16-4ee3-beb9-091a85bc7182-cert podName:4743ff0b-8d16-4ee3-beb9-091a85bc7182 nodeName:}" failed. No retries permitted until 2026-01-05 22:08:26.470900778 +0000 UTC m=+1038.048398448 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4743ff0b-8d16-4ee3-beb9-091a85bc7182-cert") pod "openstack-baremetal-operator-controller-manager-596c464d775rs5r" (UID: "4743ff0b-8d16-4ee3-beb9-091a85bc7182") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 05 22:08:10 crc kubenswrapper[4910]: E0105 22:08:10.652374 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:df69e4193043476bc71d0e06ac8bc7bbd17f7b624d495aae6b7c5e5b40c9e1e7\\\"\"" pod="openstack-operators/swift-operator-controller-manager-bb586bbf4-75hnw" podUID="5965fe2f-f268-418d-b039-682eb20f87ea" Jan 05 22:08:10 crc kubenswrapper[4910]: E0105 22:08:10.653307 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/glance-operator@sha256:19345236c6b6bd5ae772e336fa6065c6e94c8990d1bf05d30073ddb95ffffb4d\\\"\"" pod="openstack-operators/glance-operator-controller-manager-7b549fc966-67s8n" podUID="c3362390-1824-422e-8a9b-dfcfc1098cfd" Jan 05 22:08:10 crc kubenswrapper[4910]: I0105 22:08:10.673405 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-metrics-certs\") pod \"openstack-operator-controller-manager-555f86cbf8-l5n82\" (UID: \"2c5927c5-767c-49f2-91f1-c46608c506ff\") " pod="openstack-operators/openstack-operator-controller-manager-555f86cbf8-l5n82" Jan 05 22:08:10 crc kubenswrapper[4910]: I0105 22:08:10.673769 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-webhook-certs\") pod \"openstack-operator-controller-manager-555f86cbf8-l5n82\" (UID: \"2c5927c5-767c-49f2-91f1-c46608c506ff\") " pod="openstack-operators/openstack-operator-controller-manager-555f86cbf8-l5n82" Jan 05 22:08:10 crc kubenswrapper[4910]: E0105 22:08:10.674057 4910 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 05 22:08:10 crc kubenswrapper[4910]: E0105 22:08:10.674288 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-webhook-certs podName:2c5927c5-767c-49f2-91f1-c46608c506ff nodeName:}" failed. No retries permitted until 2026-01-05 22:08:26.674266439 +0000 UTC m=+1038.251764109 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-webhook-certs") pod "openstack-operator-controller-manager-555f86cbf8-l5n82" (UID: "2c5927c5-767c-49f2-91f1-c46608c506ff") : secret "webhook-server-cert" not found Jan 05 22:08:10 crc kubenswrapper[4910]: E0105 22:08:10.674061 4910 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 05 22:08:10 crc kubenswrapper[4910]: E0105 22:08:10.675340 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-metrics-certs podName:2c5927c5-767c-49f2-91f1-c46608c506ff nodeName:}" failed. No retries permitted until 2026-01-05 22:08:26.675326964 +0000 UTC m=+1038.252824644 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-metrics-certs") pod "openstack-operator-controller-manager-555f86cbf8-l5n82" (UID: "2c5927c5-767c-49f2-91f1-c46608c506ff") : secret "metrics-server-cert" not found Jan 05 22:08:10 crc kubenswrapper[4910]: I0105 22:08:10.952108 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:08:10 crc kubenswrapper[4910]: I0105 22:08:10.952199 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:08:11 crc kubenswrapper[4910]: E0105 22:08:11.102775 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/manila-operator@sha256:c846ab4a49272557884db6b976f979e6b9dce1aa73e5eb7872b4472f44602a1c" Jan 05 22:08:11 crc kubenswrapper[4910]: E0105 22:08:11.103027 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:c846ab4a49272557884db6b976f979e6b9dce1aa73e5eb7872b4472f44602a1c,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ljk2m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-598945d5b8-n7nvl_openstack-operators(ab5bdb68-d0c3-436b-a7d5-36fe8be5bd90): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 05 22:08:11 crc kubenswrapper[4910]: E0105 22:08:11.104272 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/manila-operator-controller-manager-598945d5b8-n7nvl" podUID="ab5bdb68-d0c3-436b-a7d5-36fe8be5bd90" Jan 05 22:08:11 crc kubenswrapper[4910]: E0105 22:08:11.660563 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:c846ab4a49272557884db6b976f979e6b9dce1aa73e5eb7872b4472f44602a1c\\\"\"" pod="openstack-operators/manila-operator-controller-manager-598945d5b8-n7nvl" podUID="ab5bdb68-d0c3-436b-a7d5-36fe8be5bd90" Jan 05 22:08:11 crc kubenswrapper[4910]: E0105 22:08:11.894437 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/horizon-operator@sha256:b7111c690e8fda3cb0c5969bcfa68308907fd0cf05f73ecdcb9ac1423aa7bba3" Jan 05 22:08:11 crc kubenswrapper[4910]: E0105 22:08:11.894644 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/horizon-operator@sha256:b7111c690e8fda3cb0c5969bcfa68308907fd0cf05f73ecdcb9ac1423aa7bba3,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-whdjh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-operator-controller-manager-7f5ddd8d7b-ht2cs_openstack-operators(98338ebd-fd6a-49de-a042-edb94e115570): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 05 22:08:11 crc kubenswrapper[4910]: E0105 22:08:11.896104 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/horizon-operator-controller-manager-7f5ddd8d7b-ht2cs" podUID="98338ebd-fd6a-49de-a042-edb94e115570" Jan 05 22:08:12 crc kubenswrapper[4910]: E0105 22:08:12.665861 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/horizon-operator@sha256:b7111c690e8fda3cb0c5969bcfa68308907fd0cf05f73ecdcb9ac1423aa7bba3\\\"\"" pod="openstack-operators/horizon-operator-controller-manager-7f5ddd8d7b-ht2cs" podUID="98338ebd-fd6a-49de-a042-edb94e115570" Jan 05 22:08:12 crc kubenswrapper[4910]: E0105 22:08:12.740181 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/telemetry-operator@sha256:3c1b2858c64110448d801905fbbf3ffe7f78d264cc46ab12ab2d724842dba309" Jan 05 22:08:12 crc kubenswrapper[4910]: E0105 22:08:12.740827 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:3c1b2858c64110448d801905fbbf3ffe7f78d264cc46ab12ab2d724842dba309,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-92gv2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-68d988df55-4xm5m_openstack-operators(299d2ab3-3f1b-4464-ab11-22aec9d915dd): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 05 22:08:12 crc kubenswrapper[4910]: E0105 22:08:12.742081 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/telemetry-operator-controller-manager-68d988df55-4xm5m" podUID="299d2ab3-3f1b-4464-ab11-22aec9d915dd" Jan 05 22:08:13 crc kubenswrapper[4910]: E0105 22:08:13.682034 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:3c1b2858c64110448d801905fbbf3ffe7f78d264cc46ab12ab2d724842dba309\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-68d988df55-4xm5m" podUID="299d2ab3-3f1b-4464-ab11-22aec9d915dd" Jan 05 22:08:13 crc kubenswrapper[4910]: E0105 22:08:13.775687 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/mariadb-operator@sha256:c10647131e6fa6afeb11ea28e513b60f22dbfbb4ddc3727850b1fe5799890c41" Jan 05 22:08:13 crc kubenswrapper[4910]: E0105 22:08:13.775975 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:c10647131e6fa6afeb11ea28e513b60f22dbfbb4ddc3727850b1fe5799890c41,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mr6br,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-7b88bfc995-fvgtr_openstack-operators(c4b3f034-ce14-4081-a47c-feac32565388): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 05 22:08:13 crc kubenswrapper[4910]: E0105 22:08:13.779199 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/mariadb-operator-controller-manager-7b88bfc995-fvgtr" podUID="c4b3f034-ce14-4081-a47c-feac32565388" Jan 05 22:08:14 crc kubenswrapper[4910]: E0105 22:08:14.550559 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/test-operator@sha256:4e3d234c1398039c2593611f7b0fd2a6b284cafb1563e6737876a265b9af42b6" Jan 05 22:08:14 crc kubenswrapper[4910]: E0105 22:08:14.551230 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:4e3d234c1398039c2593611f7b0fd2a6b284cafb1563e6737876a265b9af42b6,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cjxdk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-6c866cfdcb-j9dht_openstack-operators(72bc4794-4890-40dc-9d78-6a02f2983ddf): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 05 22:08:14 crc kubenswrapper[4910]: E0105 22:08:14.552472 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/test-operator-controller-manager-6c866cfdcb-j9dht" podUID="72bc4794-4890-40dc-9d78-6a02f2983ddf" Jan 05 22:08:14 crc kubenswrapper[4910]: E0105 22:08:14.691076 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/mariadb-operator@sha256:c10647131e6fa6afeb11ea28e513b60f22dbfbb4ddc3727850b1fe5799890c41\\\"\"" pod="openstack-operators/mariadb-operator-controller-manager-7b88bfc995-fvgtr" podUID="c4b3f034-ce14-4081-a47c-feac32565388" Jan 05 22:08:14 crc kubenswrapper[4910]: E0105 22:08:14.691335 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:4e3d234c1398039c2593611f7b0fd2a6b284cafb1563e6737876a265b9af42b6\\\"\"" pod="openstack-operators/test-operator-controller-manager-6c866cfdcb-j9dht" podUID="72bc4794-4890-40dc-9d78-6a02f2983ddf" Jan 05 22:08:24 crc kubenswrapper[4910]: I0105 22:08:24.769514 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-7b549fc966-67s8n" event={"ID":"c3362390-1824-422e-8a9b-dfcfc1098cfd","Type":"ContainerStarted","Data":"b59c6d45c842348d4f169165ec97f9a28ef5ecd2adb25000d8113cd8299bcd33"} Jan 05 22:08:24 crc kubenswrapper[4910]: I0105 22:08:24.770426 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-7b549fc966-67s8n" Jan 05 22:08:24 crc kubenswrapper[4910]: I0105 22:08:24.770930 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-78979fc445-v4jmq" event={"ID":"10a1f9a3-7d22-4e02-8b5b-4ae1374194cf","Type":"ContainerStarted","Data":"6625a7a54541b0df65577b17029917cdfb46496a5f0d9d3baffc50d5b955a71a"} Jan 05 22:08:24 crc kubenswrapper[4910]: I0105 22:08:24.771186 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-78979fc445-v4jmq" Jan 05 22:08:24 crc kubenswrapper[4910]: I0105 22:08:24.772185 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-f99f54bc8-hfxg8" event={"ID":"6bfbc4ba-ffa7-42df-9123-945bbe818352","Type":"ContainerStarted","Data":"c71111da2f818113531186783c651c44543c4d706db8ea877b2754cdc28aee77"} Jan 05 22:08:24 crc kubenswrapper[4910]: I0105 22:08:24.772238 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-f99f54bc8-hfxg8" Jan 05 22:08:24 crc kubenswrapper[4910]: I0105 22:08:24.773205 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-p4tsz" event={"ID":"ac3384d0-7a86-4e80-94a7-e0ff9bd32143","Type":"ContainerStarted","Data":"fd27fff7d1a52f6c25d562254e886166aed352aa37021e6c3158e6c069e145a1"} Jan 05 22:08:24 crc kubenswrapper[4910]: I0105 22:08:24.773569 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-p4tsz" Jan 05 22:08:24 crc kubenswrapper[4910]: I0105 22:08:24.774648 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-9b6f8f78c-8j9cf" event={"ID":"9224f0b2-2621-43c8-b061-66c826994814","Type":"ContainerStarted","Data":"3c5fa416c0a255e9b11fc5050b3c2fafd33cbb0feb9f3582559ae78047357042"} Jan 05 22:08:24 crc kubenswrapper[4910]: I0105 22:08:24.775025 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-9b6f8f78c-8j9cf" Jan 05 22:08:24 crc kubenswrapper[4910]: I0105 22:08:24.776154 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-9dbdf6486-mjkf5" event={"ID":"9ba478cb-baeb-4955-b84b-872aacf97065","Type":"ContainerStarted","Data":"dd2191703982cd436cf6af4919e8c86fdcb14b7fa4781cc2c5fc1a82cff9d237"} Jan 05 22:08:24 crc kubenswrapper[4910]: I0105 22:08:24.776510 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-9dbdf6486-mjkf5" Jan 05 22:08:24 crc kubenswrapper[4910]: I0105 22:08:24.777557 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-658dd65b86-hjhdr" event={"ID":"69c2eea7-1ac0-42b2-b1b7-d4ffaba1a9b7","Type":"ContainerStarted","Data":"84e4787e7b698a230af9ffbfe41744455643e73d64861a84424f186540526509"} Jan 05 22:08:24 crc kubenswrapper[4910]: I0105 22:08:24.777989 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-658dd65b86-hjhdr" Jan 05 22:08:24 crc kubenswrapper[4910]: I0105 22:08:24.779053 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-66f8b87655-d6q5k" event={"ID":"af80b86a-ae5b-4e42-b4c7-9fc033d4fd26","Type":"ContainerStarted","Data":"b7e1cb559ada05c1005651f80d2aa771f6b22852bad5d5e1ebd966c2094d6340"} Jan 05 22:08:24 crc kubenswrapper[4910]: I0105 22:08:24.779438 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-66f8b87655-d6q5k" Jan 05 22:08:24 crc kubenswrapper[4910]: I0105 22:08:24.780532 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-trrvg" event={"ID":"dee3cccb-8251-41b8-82ec-a696001f803d","Type":"ContainerStarted","Data":"3dc9d4c00495e6f6baf06a3e5cf1f0b1900e59caefd6c508d7e21c960b9a7288"} Jan 05 22:08:24 crc kubenswrapper[4910]: I0105 22:08:24.780874 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-trrvg" Jan 05 22:08:24 crc kubenswrapper[4910]: I0105 22:08:24.781845 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-f6f74d6db-t7xl6" event={"ID":"11188457-aabb-45d0-85d5-3ae1fc7a085f","Type":"ContainerStarted","Data":"2569d734d5d2ae8bec2012eb7510387bae69db6405c9ae7e73e4b2fabf1b203d"} Jan 05 22:08:24 crc kubenswrapper[4910]: I0105 22:08:24.782177 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-f6f74d6db-t7xl6" Jan 05 22:08:24 crc kubenswrapper[4910]: I0105 22:08:24.783750 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cwjm5" event={"ID":"31f503c0-3017-4c01-8594-7b6775a0f397","Type":"ContainerStarted","Data":"be07b3154cb1a7d06467643d2c4c062cf80bc338c1aeb96ee6b0984a37e4f775"} Jan 05 22:08:24 crc kubenswrapper[4910]: I0105 22:08:24.785723 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-2qtrb" event={"ID":"aa22d9f9-f865-495a-a0f0-a8aa424051aa","Type":"ContainerStarted","Data":"788845d1ca1b93f6024f59b2846658f405305aebc1fa6ef8385203ec65d0f8d3"} Jan 05 22:08:24 crc kubenswrapper[4910]: I0105 22:08:24.786168 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-2qtrb" Jan 05 22:08:24 crc kubenswrapper[4910]: I0105 22:08:24.792013 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-568985c78-wk9gv" event={"ID":"84b7e891-c710-431d-81e3-3d0fef0bf08e","Type":"ContainerStarted","Data":"d217d1b3542d32312e493ddb66351ef268cfe05ec18536c0f544a6a3808d1a77"} Jan 05 22:08:24 crc kubenswrapper[4910]: I0105 22:08:24.792548 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-568985c78-wk9gv" Jan 05 22:08:24 crc kubenswrapper[4910]: I0105 22:08:24.793562 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-mrws8" event={"ID":"e03b4a44-b8b3-46db-a760-cb3f43f83bea","Type":"ContainerStarted","Data":"c76f654f2f3a229f68c599bcd90be2aec9488becc207ee53ed883650ef5c748e"} Jan 05 22:08:24 crc kubenswrapper[4910]: I0105 22:08:24.793890 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-mrws8" Jan 05 22:08:24 crc kubenswrapper[4910]: I0105 22:08:24.906656 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-7b549fc966-67s8n" podStartSLOduration=3.700912341 podStartE2EDuration="31.906637821s" podCreationTimestamp="2026-01-05 22:07:53 +0000 UTC" firstStartedPulling="2026-01-05 22:07:55.554129141 +0000 UTC m=+1007.131626811" lastFinishedPulling="2026-01-05 22:08:23.759854621 +0000 UTC m=+1035.337352291" observedRunningTime="2026-01-05 22:08:24.860351562 +0000 UTC m=+1036.437849232" watchObservedRunningTime="2026-01-05 22:08:24.906637821 +0000 UTC m=+1036.484135491" Jan 05 22:08:25 crc kubenswrapper[4910]: I0105 22:08:25.007891 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-mrws8" podStartSLOduration=3.39413121 podStartE2EDuration="31.00787204s" podCreationTimestamp="2026-01-05 22:07:54 +0000 UTC" firstStartedPulling="2026-01-05 22:07:56.114361016 +0000 UTC m=+1007.691858686" lastFinishedPulling="2026-01-05 22:08:23.728101816 +0000 UTC m=+1035.305599516" observedRunningTime="2026-01-05 22:08:25.003421312 +0000 UTC m=+1036.580918982" watchObservedRunningTime="2026-01-05 22:08:25.00787204 +0000 UTC m=+1036.585369710" Jan 05 22:08:25 crc kubenswrapper[4910]: I0105 22:08:25.009269 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-2qtrb" podStartSLOduration=4.395615286 podStartE2EDuration="32.009259024s" podCreationTimestamp="2026-01-05 22:07:53 +0000 UTC" firstStartedPulling="2026-01-05 22:07:56.11453337 +0000 UTC m=+1007.692031040" lastFinishedPulling="2026-01-05 22:08:23.728177068 +0000 UTC m=+1035.305674778" observedRunningTime="2026-01-05 22:08:24.907715977 +0000 UTC m=+1036.485213647" watchObservedRunningTime="2026-01-05 22:08:25.009259024 +0000 UTC m=+1036.586756694" Jan 05 22:08:25 crc kubenswrapper[4910]: I0105 22:08:25.046306 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-f6f74d6db-t7xl6" podStartSLOduration=4.527578935 podStartE2EDuration="32.046276997s" podCreationTimestamp="2026-01-05 22:07:53 +0000 UTC" firstStartedPulling="2026-01-05 22:07:55.568362818 +0000 UTC m=+1007.145860488" lastFinishedPulling="2026-01-05 22:08:23.08706088 +0000 UTC m=+1034.664558550" observedRunningTime="2026-01-05 22:08:25.028308218 +0000 UTC m=+1036.605805898" watchObservedRunningTime="2026-01-05 22:08:25.046276997 +0000 UTC m=+1036.623774667" Jan 05 22:08:25 crc kubenswrapper[4910]: I0105 22:08:25.121204 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-p4tsz" podStartSLOduration=3.475803283 podStartE2EDuration="31.121177094s" podCreationTimestamp="2026-01-05 22:07:54 +0000 UTC" firstStartedPulling="2026-01-05 22:07:56.113031784 +0000 UTC m=+1007.690529454" lastFinishedPulling="2026-01-05 22:08:23.758405555 +0000 UTC m=+1035.335903265" observedRunningTime="2026-01-05 22:08:25.083434763 +0000 UTC m=+1036.660932443" watchObservedRunningTime="2026-01-05 22:08:25.121177094 +0000 UTC m=+1036.698674764" Jan 05 22:08:25 crc kubenswrapper[4910]: I0105 22:08:25.141792 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-66f8b87655-d6q5k" podStartSLOduration=8.513363563 podStartE2EDuration="32.141763896s" podCreationTimestamp="2026-01-05 22:07:53 +0000 UTC" firstStartedPulling="2026-01-05 22:07:55.557654457 +0000 UTC m=+1007.135152127" lastFinishedPulling="2026-01-05 22:08:19.18605479 +0000 UTC m=+1030.763552460" observedRunningTime="2026-01-05 22:08:25.135515794 +0000 UTC m=+1036.713013464" watchObservedRunningTime="2026-01-05 22:08:25.141763896 +0000 UTC m=+1036.719261566" Jan 05 22:08:25 crc kubenswrapper[4910]: I0105 22:08:25.238759 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-9dbdf6486-mjkf5" podStartSLOduration=3.62371809 podStartE2EDuration="31.238738841s" podCreationTimestamp="2026-01-05 22:07:54 +0000 UTC" firstStartedPulling="2026-01-05 22:07:56.113136396 +0000 UTC m=+1007.690634066" lastFinishedPulling="2026-01-05 22:08:23.728157147 +0000 UTC m=+1035.305654817" observedRunningTime="2026-01-05 22:08:25.236347733 +0000 UTC m=+1036.813845403" watchObservedRunningTime="2026-01-05 22:08:25.238738841 +0000 UTC m=+1036.816236511" Jan 05 22:08:25 crc kubenswrapper[4910]: I0105 22:08:25.240034 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-9b6f8f78c-8j9cf" podStartSLOduration=9.361087671 podStartE2EDuration="31.240029703s" podCreationTimestamp="2026-01-05 22:07:54 +0000 UTC" firstStartedPulling="2026-01-05 22:07:56.004940517 +0000 UTC m=+1007.582438177" lastFinishedPulling="2026-01-05 22:08:17.883882499 +0000 UTC m=+1029.461380209" observedRunningTime="2026-01-05 22:08:25.181505375 +0000 UTC m=+1036.759003045" watchObservedRunningTime="2026-01-05 22:08:25.240029703 +0000 UTC m=+1036.817527373" Jan 05 22:08:25 crc kubenswrapper[4910]: I0105 22:08:25.324703 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-trrvg" podStartSLOduration=4.707618637 podStartE2EDuration="32.324682038s" podCreationTimestamp="2026-01-05 22:07:53 +0000 UTC" firstStartedPulling="2026-01-05 22:07:56.11329885 +0000 UTC m=+1007.690796520" lastFinishedPulling="2026-01-05 22:08:23.730362241 +0000 UTC m=+1035.307859921" observedRunningTime="2026-01-05 22:08:25.322651218 +0000 UTC m=+1036.900148888" watchObservedRunningTime="2026-01-05 22:08:25.324682038 +0000 UTC m=+1036.902179708" Jan 05 22:08:25 crc kubenswrapper[4910]: I0105 22:08:25.328293 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-658dd65b86-hjhdr" podStartSLOduration=8.703125781 podStartE2EDuration="32.328283365s" podCreationTimestamp="2026-01-05 22:07:53 +0000 UTC" firstStartedPulling="2026-01-05 22:07:55.560877926 +0000 UTC m=+1007.138375596" lastFinishedPulling="2026-01-05 22:08:19.18603549 +0000 UTC m=+1030.763533180" observedRunningTime="2026-01-05 22:08:25.283153205 +0000 UTC m=+1036.860650875" watchObservedRunningTime="2026-01-05 22:08:25.328283365 +0000 UTC m=+1036.905781035" Jan 05 22:08:25 crc kubenswrapper[4910]: I0105 22:08:25.386616 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-78979fc445-v4jmq" podStartSLOduration=8.358678841 podStartE2EDuration="32.386595618s" podCreationTimestamp="2026-01-05 22:07:53 +0000 UTC" firstStartedPulling="2026-01-05 22:07:55.155844567 +0000 UTC m=+1006.733342237" lastFinishedPulling="2026-01-05 22:08:19.183761344 +0000 UTC m=+1030.761259014" observedRunningTime="2026-01-05 22:08:25.384300712 +0000 UTC m=+1036.961798382" watchObservedRunningTime="2026-01-05 22:08:25.386595618 +0000 UTC m=+1036.964093278" Jan 05 22:08:25 crc kubenswrapper[4910]: I0105 22:08:25.458136 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-568985c78-wk9gv" podStartSLOduration=4.810582089 podStartE2EDuration="32.458097182s" podCreationTimestamp="2026-01-05 22:07:53 +0000 UTC" firstStartedPulling="2026-01-05 22:07:56.113401503 +0000 UTC m=+1007.690899173" lastFinishedPulling="2026-01-05 22:08:23.760916596 +0000 UTC m=+1035.338414266" observedRunningTime="2026-01-05 22:08:25.456946744 +0000 UTC m=+1037.034444414" watchObservedRunningTime="2026-01-05 22:08:25.458097182 +0000 UTC m=+1037.035594872" Jan 05 22:08:25 crc kubenswrapper[4910]: I0105 22:08:25.458822 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-f99f54bc8-hfxg8" podStartSLOduration=9.387968545 podStartE2EDuration="32.458814489s" podCreationTimestamp="2026-01-05 22:07:53 +0000 UTC" firstStartedPulling="2026-01-05 22:07:56.113010373 +0000 UTC m=+1007.690508043" lastFinishedPulling="2026-01-05 22:08:19.183856307 +0000 UTC m=+1030.761353987" observedRunningTime="2026-01-05 22:08:25.425688461 +0000 UTC m=+1037.003186131" watchObservedRunningTime="2026-01-05 22:08:25.458814489 +0000 UTC m=+1037.036312169" Jan 05 22:08:25 crc kubenswrapper[4910]: I0105 22:08:25.496826 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cwjm5" podStartSLOduration=3.906661952 podStartE2EDuration="31.496800866s" podCreationTimestamp="2026-01-05 22:07:54 +0000 UTC" firstStartedPulling="2026-01-05 22:07:56.139663643 +0000 UTC m=+1007.717161313" lastFinishedPulling="2026-01-05 22:08:23.729802557 +0000 UTC m=+1035.307300227" observedRunningTime="2026-01-05 22:08:25.492834649 +0000 UTC m=+1037.070332319" watchObservedRunningTime="2026-01-05 22:08:25.496800866 +0000 UTC m=+1037.074298536" Jan 05 22:08:25 crc kubenswrapper[4910]: I0105 22:08:25.804297 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-bb586bbf4-75hnw" event={"ID":"5965fe2f-f268-418d-b039-682eb20f87ea","Type":"ContainerStarted","Data":"a6d0b92da65797e5197b1069fcc66a7783cd4557dc7b075624fe0b619485033b"} Jan 05 22:08:25 crc kubenswrapper[4910]: I0105 22:08:25.805889 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-bb586bbf4-75hnw" Jan 05 22:08:25 crc kubenswrapper[4910]: I0105 22:08:25.840900 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-bb586bbf4-75hnw" podStartSLOduration=2.766651396 podStartE2EDuration="31.840878778s" podCreationTimestamp="2026-01-05 22:07:54 +0000 UTC" firstStartedPulling="2026-01-05 22:07:56.09074639 +0000 UTC m=+1007.668244060" lastFinishedPulling="2026-01-05 22:08:25.164973772 +0000 UTC m=+1036.742471442" observedRunningTime="2026-01-05 22:08:25.840656023 +0000 UTC m=+1037.418153693" watchObservedRunningTime="2026-01-05 22:08:25.840878778 +0000 UTC m=+1037.418376448" Jan 05 22:08:25 crc kubenswrapper[4910]: I0105 22:08:25.977355 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed-cert\") pod \"infra-operator-controller-manager-6d99759cf-89lvl\" (UID: \"b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed\") " pod="openstack-operators/infra-operator-controller-manager-6d99759cf-89lvl" Jan 05 22:08:25 crc kubenswrapper[4910]: I0105 22:08:25.986020 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed-cert\") pod \"infra-operator-controller-manager-6d99759cf-89lvl\" (UID: \"b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed\") " pod="openstack-operators/infra-operator-controller-manager-6d99759cf-89lvl" Jan 05 22:08:25 crc kubenswrapper[4910]: I0105 22:08:25.997811 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-bhz9k" Jan 05 22:08:26 crc kubenswrapper[4910]: I0105 22:08:26.006221 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-6d99759cf-89lvl" Jan 05 22:08:26 crc kubenswrapper[4910]: I0105 22:08:26.301655 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-6d99759cf-89lvl"] Jan 05 22:08:26 crc kubenswrapper[4910]: I0105 22:08:26.490579 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4743ff0b-8d16-4ee3-beb9-091a85bc7182-cert\") pod \"openstack-baremetal-operator-controller-manager-596c464d775rs5r\" (UID: \"4743ff0b-8d16-4ee3-beb9-091a85bc7182\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-596c464d775rs5r" Jan 05 22:08:26 crc kubenswrapper[4910]: I0105 22:08:26.498173 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4743ff0b-8d16-4ee3-beb9-091a85bc7182-cert\") pod \"openstack-baremetal-operator-controller-manager-596c464d775rs5r\" (UID: \"4743ff0b-8d16-4ee3-beb9-091a85bc7182\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-596c464d775rs5r" Jan 05 22:08:26 crc kubenswrapper[4910]: I0105 22:08:26.693242 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-metrics-certs\") pod \"openstack-operator-controller-manager-555f86cbf8-l5n82\" (UID: \"2c5927c5-767c-49f2-91f1-c46608c506ff\") " pod="openstack-operators/openstack-operator-controller-manager-555f86cbf8-l5n82" Jan 05 22:08:26 crc kubenswrapper[4910]: I0105 22:08:26.693314 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-webhook-certs\") pod \"openstack-operator-controller-manager-555f86cbf8-l5n82\" (UID: \"2c5927c5-767c-49f2-91f1-c46608c506ff\") " pod="openstack-operators/openstack-operator-controller-manager-555f86cbf8-l5n82" Jan 05 22:08:26 crc kubenswrapper[4910]: I0105 22:08:26.697847 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-webhook-certs\") pod \"openstack-operator-controller-manager-555f86cbf8-l5n82\" (UID: \"2c5927c5-767c-49f2-91f1-c46608c506ff\") " pod="openstack-operators/openstack-operator-controller-manager-555f86cbf8-l5n82" Jan 05 22:08:26 crc kubenswrapper[4910]: I0105 22:08:26.700497 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2c5927c5-767c-49f2-91f1-c46608c506ff-metrics-certs\") pod \"openstack-operator-controller-manager-555f86cbf8-l5n82\" (UID: \"2c5927c5-767c-49f2-91f1-c46608c506ff\") " pod="openstack-operators/openstack-operator-controller-manager-555f86cbf8-l5n82" Jan 05 22:08:26 crc kubenswrapper[4910]: I0105 22:08:26.749156 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-pp76s" Jan 05 22:08:26 crc kubenswrapper[4910]: I0105 22:08:26.756469 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-596c464d775rs5r" Jan 05 22:08:26 crc kubenswrapper[4910]: I0105 22:08:26.835248 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-68d988df55-4xm5m" event={"ID":"299d2ab3-3f1b-4464-ab11-22aec9d915dd","Type":"ContainerStarted","Data":"d4b24d9fd8a548666baa5f1de2f07faa96f2a9ec6dceb90ad3431714d3a62996"} Jan 05 22:08:26 crc kubenswrapper[4910]: I0105 22:08:26.836607 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-68d988df55-4xm5m" Jan 05 22:08:26 crc kubenswrapper[4910]: I0105 22:08:26.840001 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6d99759cf-89lvl" event={"ID":"b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed","Type":"ContainerStarted","Data":"d3ed15c80d8a350f05c1ed2b52dfd73dc54fffe098e6343f71208a256c7ad569"} Jan 05 22:08:26 crc kubenswrapper[4910]: I0105 22:08:26.842909 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-7b88bfc995-fvgtr" event={"ID":"c4b3f034-ce14-4081-a47c-feac32565388","Type":"ContainerStarted","Data":"0ce0a9db1d9cd410ab45624192dcc887d7819b288c24f227307e35121f00de21"} Jan 05 22:08:26 crc kubenswrapper[4910]: I0105 22:08:26.843287 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-7b88bfc995-fvgtr" Jan 05 22:08:26 crc kubenswrapper[4910]: I0105 22:08:26.857166 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-25mrr" Jan 05 22:08:26 crc kubenswrapper[4910]: I0105 22:08:26.860197 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-68d988df55-4xm5m" podStartSLOduration=2.748667987 podStartE2EDuration="32.86017789s" podCreationTimestamp="2026-01-05 22:07:54 +0000 UTC" firstStartedPulling="2026-01-05 22:07:56.107943449 +0000 UTC m=+1007.685441119" lastFinishedPulling="2026-01-05 22:08:26.219453352 +0000 UTC m=+1037.796951022" observedRunningTime="2026-01-05 22:08:26.856932631 +0000 UTC m=+1038.434430301" watchObservedRunningTime="2026-01-05 22:08:26.86017789 +0000 UTC m=+1038.437675570" Jan 05 22:08:26 crc kubenswrapper[4910]: I0105 22:08:26.863206 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-555f86cbf8-l5n82" Jan 05 22:08:26 crc kubenswrapper[4910]: I0105 22:08:26.875252 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-7b88bfc995-fvgtr" podStartSLOduration=3.7002213360000002 podStartE2EDuration="33.875234658s" podCreationTimestamp="2026-01-05 22:07:53 +0000 UTC" firstStartedPulling="2026-01-05 22:07:56.048276964 +0000 UTC m=+1007.625774634" lastFinishedPulling="2026-01-05 22:08:26.223290286 +0000 UTC m=+1037.800787956" observedRunningTime="2026-01-05 22:08:26.871399004 +0000 UTC m=+1038.448896674" watchObservedRunningTime="2026-01-05 22:08:26.875234658 +0000 UTC m=+1038.452732328" Jan 05 22:08:27 crc kubenswrapper[4910]: I0105 22:08:27.337785 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-596c464d775rs5r"] Jan 05 22:08:27 crc kubenswrapper[4910]: I0105 22:08:27.470305 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-555f86cbf8-l5n82"] Jan 05 22:08:27 crc kubenswrapper[4910]: I0105 22:08:27.850238 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-555f86cbf8-l5n82" event={"ID":"2c5927c5-767c-49f2-91f1-c46608c506ff","Type":"ContainerStarted","Data":"6e2429fb8d9997568de9d2d42e8e092da58e09ed13889d7ec00a2a134860907c"} Jan 05 22:08:27 crc kubenswrapper[4910]: I0105 22:08:27.851191 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-596c464d775rs5r" event={"ID":"4743ff0b-8d16-4ee3-beb9-091a85bc7182","Type":"ContainerStarted","Data":"59b4c68652b368f84a54d6d49dab0f006c6db90e715a2b8f644838bf82114d84"} Jan 05 22:08:33 crc kubenswrapper[4910]: I0105 22:08:33.999103 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-78979fc445-v4jmq" Jan 05 22:08:34 crc kubenswrapper[4910]: I0105 22:08:34.081026 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-66f8b87655-d6q5k" Jan 05 22:08:34 crc kubenswrapper[4910]: I0105 22:08:34.128274 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-7b549fc966-67s8n" Jan 05 22:08:34 crc kubenswrapper[4910]: I0105 22:08:34.132876 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-658dd65b86-hjhdr" Jan 05 22:08:34 crc kubenswrapper[4910]: I0105 22:08:34.289641 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-f6f74d6db-t7xl6" Jan 05 22:08:34 crc kubenswrapper[4910]: I0105 22:08:34.488836 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-568985c78-wk9gv" Jan 05 22:08:34 crc kubenswrapper[4910]: I0105 22:08:34.489184 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-f99f54bc8-hfxg8" Jan 05 22:08:34 crc kubenswrapper[4910]: I0105 22:08:34.577307 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-7b88bfc995-fvgtr" Jan 05 22:08:34 crc kubenswrapper[4910]: I0105 22:08:34.621008 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-5fbbf8b6cc-2qtrb" Jan 05 22:08:34 crc kubenswrapper[4910]: I0105 22:08:34.650971 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-7cd87b778f-trrvg" Jan 05 22:08:34 crc kubenswrapper[4910]: I0105 22:08:34.667291 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-68c649d9d-p4tsz" Jan 05 22:08:34 crc kubenswrapper[4910]: I0105 22:08:34.709524 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-bf6d4f946-mrws8" Jan 05 22:08:34 crc kubenswrapper[4910]: I0105 22:08:34.824777 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-bb586bbf4-75hnw" Jan 05 22:08:34 crc kubenswrapper[4910]: I0105 22:08:34.855559 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-9b6f8f78c-8j9cf" Jan 05 22:08:34 crc kubenswrapper[4910]: I0105 22:08:34.992027 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-68d988df55-4xm5m" Jan 05 22:08:35 crc kubenswrapper[4910]: I0105 22:08:35.011955 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-9dbdf6486-mjkf5" Jan 05 22:08:36 crc kubenswrapper[4910]: I0105 22:08:36.925856 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-555f86cbf8-l5n82" event={"ID":"2c5927c5-767c-49f2-91f1-c46608c506ff","Type":"ContainerStarted","Data":"afad76795b1fccebac5c0f290a80d7fdc95b2540ddae92ee14eb2492a343ca41"} Jan 05 22:08:37 crc kubenswrapper[4910]: I0105 22:08:37.934542 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-555f86cbf8-l5n82" Jan 05 22:08:37 crc kubenswrapper[4910]: I0105 22:08:37.977067 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-555f86cbf8-l5n82" podStartSLOduration=43.977044212 podStartE2EDuration="43.977044212s" podCreationTimestamp="2026-01-05 22:07:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:08:37.976572921 +0000 UTC m=+1049.554070641" watchObservedRunningTime="2026-01-05 22:08:37.977044212 +0000 UTC m=+1049.554541892" Jan 05 22:08:38 crc kubenswrapper[4910]: E0105 22:08:38.035617 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = writing blob: storing blob to file \"/var/tmp/container_images_storage2363789316/1\": happened during read: context canceled" image="quay.io/openstack-k8s-operators/infra-operator@sha256:0144c53f5c318a2a2a690f358f5574fd4c1bd580e75e738cea935f8df95e52a9" Jan 05 22:08:38 crc kubenswrapper[4910]: E0105 22:08:38.035879 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/infra-operator@sha256:0144c53f5c318a2a2a690f358f5574fd4c1bd580e75e738cea935f8df95e52a9,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{600 -3} {} 600m DecimalSI},memory: {{2147483648 0} {} 2Gi BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{536870912 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kz5wd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod infra-operator-controller-manager-6d99759cf-89lvl_openstack-operators(b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed): ErrImagePull: rpc error: code = Canceled desc = writing blob: storing blob to file \"/var/tmp/container_images_storage2363789316/1\": happened during read: context canceled" logger="UnhandledError" Jan 05 22:08:38 crc kubenswrapper[4910]: E0105 22:08:38.037235 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = writing blob: storing blob to file \\\"/var/tmp/container_images_storage2363789316/1\\\": happened during read: context canceled\"" pod="openstack-operators/infra-operator-controller-manager-6d99759cf-89lvl" podUID="b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed" Jan 05 22:08:39 crc kubenswrapper[4910]: E0105 22:08:39.651016 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:0144c53f5c318a2a2a690f358f5574fd4c1bd580e75e738cea935f8df95e52a9\\\"\"" pod="openstack-operators/infra-operator-controller-manager-6d99759cf-89lvl" podUID="b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed" Jan 05 22:08:40 crc kubenswrapper[4910]: I0105 22:08:40.951914 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:08:40 crc kubenswrapper[4910]: I0105 22:08:40.952307 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:08:40 crc kubenswrapper[4910]: I0105 22:08:40.952361 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 22:08:40 crc kubenswrapper[4910]: I0105 22:08:40.953091 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a3fde00ac3c0f56cd1cc5b71d4cb8772dfa8207e8240fa1964337d79bc9075bf"} pod="openshift-machine-config-operator/machine-config-daemon-p4t85" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 05 22:08:40 crc kubenswrapper[4910]: I0105 22:08:40.953161 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" containerID="cri-o://a3fde00ac3c0f56cd1cc5b71d4cb8772dfa8207e8240fa1964337d79bc9075bf" gracePeriod=600 Jan 05 22:08:40 crc kubenswrapper[4910]: I0105 22:08:40.958202 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-598945d5b8-n7nvl" event={"ID":"ab5bdb68-d0c3-436b-a7d5-36fe8be5bd90","Type":"ContainerStarted","Data":"60360a8b61409a1502e85f01260faee47a04801ac3cdc2607110b674f35b3d06"} Jan 05 22:08:40 crc kubenswrapper[4910]: I0105 22:08:40.958860 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-598945d5b8-n7nvl" Jan 05 22:08:40 crc kubenswrapper[4910]: I0105 22:08:40.965675 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-7f5ddd8d7b-ht2cs" event={"ID":"98338ebd-fd6a-49de-a042-edb94e115570","Type":"ContainerStarted","Data":"afa0a634e594bfebfe2fb43f9f664b4afced4ca5beb1fa65a2505e83f6d2964e"} Jan 05 22:08:40 crc kubenswrapper[4910]: I0105 22:08:40.965857 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-7f5ddd8d7b-ht2cs" Jan 05 22:08:40 crc kubenswrapper[4910]: I0105 22:08:40.972798 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-596c464d775rs5r" event={"ID":"4743ff0b-8d16-4ee3-beb9-091a85bc7182","Type":"ContainerStarted","Data":"61903631e88c8250256490ca823184a6539a220b16c695feaaaf6462f75d97a0"} Jan 05 22:08:40 crc kubenswrapper[4910]: I0105 22:08:40.973314 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-596c464d775rs5r" Jan 05 22:08:40 crc kubenswrapper[4910]: I0105 22:08:40.978274 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-6c866cfdcb-j9dht" event={"ID":"72bc4794-4890-40dc-9d78-6a02f2983ddf","Type":"ContainerStarted","Data":"7c7697da38f1017f2658dbc13e07d9d15bd55ce2a8732f527b408f0a993229e3"} Jan 05 22:08:40 crc kubenswrapper[4910]: I0105 22:08:40.979019 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-6c866cfdcb-j9dht" Jan 05 22:08:40 crc kubenswrapper[4910]: I0105 22:08:40.986422 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-598945d5b8-n7nvl" podStartSLOduration=3.6211602579999997 podStartE2EDuration="47.986400763s" podCreationTimestamp="2026-01-05 22:07:53 +0000 UTC" firstStartedPulling="2026-01-05 22:07:56.090809992 +0000 UTC m=+1007.668307662" lastFinishedPulling="2026-01-05 22:08:40.456050457 +0000 UTC m=+1052.033548167" observedRunningTime="2026-01-05 22:08:40.981318219 +0000 UTC m=+1052.558815889" watchObservedRunningTime="2026-01-05 22:08:40.986400763 +0000 UTC m=+1052.563898443" Jan 05 22:08:41 crc kubenswrapper[4910]: I0105 22:08:41.006885 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-7f5ddd8d7b-ht2cs" podStartSLOduration=3.091148171 podStartE2EDuration="48.006721379s" podCreationTimestamp="2026-01-05 22:07:53 +0000 UTC" firstStartedPulling="2026-01-05 22:07:55.539817552 +0000 UTC m=+1007.117315222" lastFinishedPulling="2026-01-05 22:08:40.45539076 +0000 UTC m=+1052.032888430" observedRunningTime="2026-01-05 22:08:40.996905799 +0000 UTC m=+1052.574403489" watchObservedRunningTime="2026-01-05 22:08:41.006721379 +0000 UTC m=+1052.584219069" Jan 05 22:08:41 crc kubenswrapper[4910]: I0105 22:08:41.029281 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-596c464d775rs5r" podStartSLOduration=33.911487004 podStartE2EDuration="47.029258998s" podCreationTimestamp="2026-01-05 22:07:54 +0000 UTC" firstStartedPulling="2026-01-05 22:08:27.339785639 +0000 UTC m=+1038.917283309" lastFinishedPulling="2026-01-05 22:08:40.457557633 +0000 UTC m=+1052.035055303" observedRunningTime="2026-01-05 22:08:41.027532566 +0000 UTC m=+1052.605030236" watchObservedRunningTime="2026-01-05 22:08:41.029258998 +0000 UTC m=+1052.606756668" Jan 05 22:08:41 crc kubenswrapper[4910]: I0105 22:08:41.987179 4910 generic.go:334] "Generic (PLEG): container finished" podID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerID="a3fde00ac3c0f56cd1cc5b71d4cb8772dfa8207e8240fa1964337d79bc9075bf" exitCode=0 Jan 05 22:08:41 crc kubenswrapper[4910]: I0105 22:08:41.987290 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerDied","Data":"a3fde00ac3c0f56cd1cc5b71d4cb8772dfa8207e8240fa1964337d79bc9075bf"} Jan 05 22:08:41 crc kubenswrapper[4910]: I0105 22:08:41.987991 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerStarted","Data":"657357707be4d8c777ee71d089740dbf0952f7ef5dc120116497297a0abbc7b5"} Jan 05 22:08:41 crc kubenswrapper[4910]: I0105 22:08:41.988030 4910 scope.go:117] "RemoveContainer" containerID="9e520e28b4c82f9c661ef0957d57afd6c58639ff887c3906d5a2d181968d14b2" Jan 05 22:08:42 crc kubenswrapper[4910]: I0105 22:08:42.011809 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-6c866cfdcb-j9dht" podStartSLOduration=3.51381713 podStartE2EDuration="48.011783922s" podCreationTimestamp="2026-01-05 22:07:54 +0000 UTC" firstStartedPulling="2026-01-05 22:07:55.959988551 +0000 UTC m=+1007.537486221" lastFinishedPulling="2026-01-05 22:08:40.457955303 +0000 UTC m=+1052.035453013" observedRunningTime="2026-01-05 22:08:41.043324241 +0000 UTC m=+1052.620821911" watchObservedRunningTime="2026-01-05 22:08:42.011783922 +0000 UTC m=+1053.589281592" Jan 05 22:08:46 crc kubenswrapper[4910]: I0105 22:08:46.764497 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-596c464d775rs5r" Jan 05 22:08:46 crc kubenswrapper[4910]: I0105 22:08:46.886063 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-555f86cbf8-l5n82" Jan 05 22:08:54 crc kubenswrapper[4910]: I0105 22:08:54.233226 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-7f5ddd8d7b-ht2cs" Jan 05 22:08:54 crc kubenswrapper[4910]: I0105 22:08:54.445157 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-598945d5b8-n7nvl" Jan 05 22:08:54 crc kubenswrapper[4910]: I0105 22:08:54.972553 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-6c866cfdcb-j9dht" Jan 05 22:08:57 crc kubenswrapper[4910]: I0105 22:08:57.120581 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6d99759cf-89lvl" event={"ID":"b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed","Type":"ContainerStarted","Data":"c9f080ab441cba145b9fef97f541c3333e3b5904b53ba9b229ded279fb6a6e02"} Jan 05 22:08:57 crc kubenswrapper[4910]: I0105 22:08:57.121260 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-6d99759cf-89lvl" Jan 05 22:08:57 crc kubenswrapper[4910]: I0105 22:08:57.144814 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-6d99759cf-89lvl" podStartSLOduration=33.966925795 podStartE2EDuration="1m4.144786793s" podCreationTimestamp="2026-01-05 22:07:53 +0000 UTC" firstStartedPulling="2026-01-05 22:08:26.32511393 +0000 UTC m=+1037.902611600" lastFinishedPulling="2026-01-05 22:08:56.502974908 +0000 UTC m=+1068.080472598" observedRunningTime="2026-01-05 22:08:57.138898009 +0000 UTC m=+1068.716395699" watchObservedRunningTime="2026-01-05 22:08:57.144786793 +0000 UTC m=+1068.722284463" Jan 05 22:09:06 crc kubenswrapper[4910]: I0105 22:09:06.014752 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-6d99759cf-89lvl" Jan 05 22:09:20 crc kubenswrapper[4910]: I0105 22:09:20.069456 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-r4h6d"] Jan 05 22:09:20 crc kubenswrapper[4910]: I0105 22:09:20.071327 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84bb9d8bd9-r4h6d" Jan 05 22:09:20 crc kubenswrapper[4910]: I0105 22:09:20.073958 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-bkmds" Jan 05 22:09:20 crc kubenswrapper[4910]: I0105 22:09:20.075481 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Jan 05 22:09:20 crc kubenswrapper[4910]: I0105 22:09:20.075787 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Jan 05 22:09:20 crc kubenswrapper[4910]: I0105 22:09:20.077780 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Jan 05 22:09:20 crc kubenswrapper[4910]: I0105 22:09:20.083373 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-r4h6d"] Jan 05 22:09:20 crc kubenswrapper[4910]: I0105 22:09:20.151418 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-9fzbj"] Jan 05 22:09:20 crc kubenswrapper[4910]: I0105 22:09:20.153045 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f854695bc-9fzbj" Jan 05 22:09:20 crc kubenswrapper[4910]: I0105 22:09:20.155668 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Jan 05 22:09:20 crc kubenswrapper[4910]: I0105 22:09:20.165721 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-9fzbj"] Jan 05 22:09:20 crc kubenswrapper[4910]: I0105 22:09:20.236671 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-62d9p\" (UniqueName: \"kubernetes.io/projected/bd9e7646-7aae-4286-8283-f166b4d60b38-kube-api-access-62d9p\") pod \"dnsmasq-dns-84bb9d8bd9-r4h6d\" (UID: \"bd9e7646-7aae-4286-8283-f166b4d60b38\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-r4h6d" Jan 05 22:09:20 crc kubenswrapper[4910]: I0105 22:09:20.236729 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd9e7646-7aae-4286-8283-f166b4d60b38-config\") pod \"dnsmasq-dns-84bb9d8bd9-r4h6d\" (UID: \"bd9e7646-7aae-4286-8283-f166b4d60b38\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-r4h6d" Jan 05 22:09:20 crc kubenswrapper[4910]: I0105 22:09:20.338695 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-62d9p\" (UniqueName: \"kubernetes.io/projected/bd9e7646-7aae-4286-8283-f166b4d60b38-kube-api-access-62d9p\") pod \"dnsmasq-dns-84bb9d8bd9-r4h6d\" (UID: \"bd9e7646-7aae-4286-8283-f166b4d60b38\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-r4h6d" Jan 05 22:09:20 crc kubenswrapper[4910]: I0105 22:09:20.338755 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd9e7646-7aae-4286-8283-f166b4d60b38-config\") pod \"dnsmasq-dns-84bb9d8bd9-r4h6d\" (UID: \"bd9e7646-7aae-4286-8283-f166b4d60b38\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-r4h6d" Jan 05 22:09:20 crc kubenswrapper[4910]: I0105 22:09:20.338808 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/87dec2ec-979f-41e3-b5b4-909962331a47-dns-svc\") pod \"dnsmasq-dns-5f854695bc-9fzbj\" (UID: \"87dec2ec-979f-41e3-b5b4-909962331a47\") " pod="openstack/dnsmasq-dns-5f854695bc-9fzbj" Jan 05 22:09:20 crc kubenswrapper[4910]: I0105 22:09:20.338838 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87dec2ec-979f-41e3-b5b4-909962331a47-config\") pod \"dnsmasq-dns-5f854695bc-9fzbj\" (UID: \"87dec2ec-979f-41e3-b5b4-909962331a47\") " pod="openstack/dnsmasq-dns-5f854695bc-9fzbj" Jan 05 22:09:20 crc kubenswrapper[4910]: I0105 22:09:20.339136 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xtfwn\" (UniqueName: \"kubernetes.io/projected/87dec2ec-979f-41e3-b5b4-909962331a47-kube-api-access-xtfwn\") pod \"dnsmasq-dns-5f854695bc-9fzbj\" (UID: \"87dec2ec-979f-41e3-b5b4-909962331a47\") " pod="openstack/dnsmasq-dns-5f854695bc-9fzbj" Jan 05 22:09:20 crc kubenswrapper[4910]: I0105 22:09:20.339748 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd9e7646-7aae-4286-8283-f166b4d60b38-config\") pod \"dnsmasq-dns-84bb9d8bd9-r4h6d\" (UID: \"bd9e7646-7aae-4286-8283-f166b4d60b38\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-r4h6d" Jan 05 22:09:20 crc kubenswrapper[4910]: I0105 22:09:20.360465 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-62d9p\" (UniqueName: \"kubernetes.io/projected/bd9e7646-7aae-4286-8283-f166b4d60b38-kube-api-access-62d9p\") pod \"dnsmasq-dns-84bb9d8bd9-r4h6d\" (UID: \"bd9e7646-7aae-4286-8283-f166b4d60b38\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-r4h6d" Jan 05 22:09:20 crc kubenswrapper[4910]: I0105 22:09:20.395246 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84bb9d8bd9-r4h6d" Jan 05 22:09:20 crc kubenswrapper[4910]: I0105 22:09:20.440654 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/87dec2ec-979f-41e3-b5b4-909962331a47-dns-svc\") pod \"dnsmasq-dns-5f854695bc-9fzbj\" (UID: \"87dec2ec-979f-41e3-b5b4-909962331a47\") " pod="openstack/dnsmasq-dns-5f854695bc-9fzbj" Jan 05 22:09:20 crc kubenswrapper[4910]: I0105 22:09:20.440727 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87dec2ec-979f-41e3-b5b4-909962331a47-config\") pod \"dnsmasq-dns-5f854695bc-9fzbj\" (UID: \"87dec2ec-979f-41e3-b5b4-909962331a47\") " pod="openstack/dnsmasq-dns-5f854695bc-9fzbj" Jan 05 22:09:20 crc kubenswrapper[4910]: I0105 22:09:20.440801 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xtfwn\" (UniqueName: \"kubernetes.io/projected/87dec2ec-979f-41e3-b5b4-909962331a47-kube-api-access-xtfwn\") pod \"dnsmasq-dns-5f854695bc-9fzbj\" (UID: \"87dec2ec-979f-41e3-b5b4-909962331a47\") " pod="openstack/dnsmasq-dns-5f854695bc-9fzbj" Jan 05 22:09:20 crc kubenswrapper[4910]: I0105 22:09:20.442467 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87dec2ec-979f-41e3-b5b4-909962331a47-config\") pod \"dnsmasq-dns-5f854695bc-9fzbj\" (UID: \"87dec2ec-979f-41e3-b5b4-909962331a47\") " pod="openstack/dnsmasq-dns-5f854695bc-9fzbj" Jan 05 22:09:20 crc kubenswrapper[4910]: I0105 22:09:20.442522 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/87dec2ec-979f-41e3-b5b4-909962331a47-dns-svc\") pod \"dnsmasq-dns-5f854695bc-9fzbj\" (UID: \"87dec2ec-979f-41e3-b5b4-909962331a47\") " pod="openstack/dnsmasq-dns-5f854695bc-9fzbj" Jan 05 22:09:20 crc kubenswrapper[4910]: I0105 22:09:20.459712 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xtfwn\" (UniqueName: \"kubernetes.io/projected/87dec2ec-979f-41e3-b5b4-909962331a47-kube-api-access-xtfwn\") pod \"dnsmasq-dns-5f854695bc-9fzbj\" (UID: \"87dec2ec-979f-41e3-b5b4-909962331a47\") " pod="openstack/dnsmasq-dns-5f854695bc-9fzbj" Jan 05 22:09:20 crc kubenswrapper[4910]: I0105 22:09:20.475998 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f854695bc-9fzbj" Jan 05 22:09:20 crc kubenswrapper[4910]: I0105 22:09:20.815853 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-r4h6d"] Jan 05 22:09:20 crc kubenswrapper[4910]: I0105 22:09:20.927769 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-9fzbj"] Jan 05 22:09:20 crc kubenswrapper[4910]: W0105 22:09:20.935228 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod87dec2ec_979f_41e3_b5b4_909962331a47.slice/crio-d8fb2aecec539558ccb6453369bee54dd0e0bceda2cdc530081ca0f28cc0c2fa WatchSource:0}: Error finding container d8fb2aecec539558ccb6453369bee54dd0e0bceda2cdc530081ca0f28cc0c2fa: Status 404 returned error can't find the container with id d8fb2aecec539558ccb6453369bee54dd0e0bceda2cdc530081ca0f28cc0c2fa Jan 05 22:09:21 crc kubenswrapper[4910]: I0105 22:09:21.315385 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84bb9d8bd9-r4h6d" event={"ID":"bd9e7646-7aae-4286-8283-f166b4d60b38","Type":"ContainerStarted","Data":"dd2cec7ab9e89079e41671792072c08803d8244fb8f8a391b216ad254ba4b94a"} Jan 05 22:09:21 crc kubenswrapper[4910]: I0105 22:09:21.317516 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f854695bc-9fzbj" event={"ID":"87dec2ec-979f-41e3-b5b4-909962331a47","Type":"ContainerStarted","Data":"d8fb2aecec539558ccb6453369bee54dd0e0bceda2cdc530081ca0f28cc0c2fa"} Jan 05 22:09:22 crc kubenswrapper[4910]: I0105 22:09:22.911137 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-9fzbj"] Jan 05 22:09:22 crc kubenswrapper[4910]: I0105 22:09:22.935550 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-744ffd65bc-qqb6h"] Jan 05 22:09:22 crc kubenswrapper[4910]: I0105 22:09:22.937036 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-744ffd65bc-qqb6h" Jan 05 22:09:22 crc kubenswrapper[4910]: I0105 22:09:22.950439 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-744ffd65bc-qqb6h"] Jan 05 22:09:22 crc kubenswrapper[4910]: I0105 22:09:22.986137 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7bab51b8-290d-45bf-b81b-6fa629125e05-dns-svc\") pod \"dnsmasq-dns-744ffd65bc-qqb6h\" (UID: \"7bab51b8-290d-45bf-b81b-6fa629125e05\") " pod="openstack/dnsmasq-dns-744ffd65bc-qqb6h" Jan 05 22:09:22 crc kubenswrapper[4910]: I0105 22:09:22.986401 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tr88j\" (UniqueName: \"kubernetes.io/projected/7bab51b8-290d-45bf-b81b-6fa629125e05-kube-api-access-tr88j\") pod \"dnsmasq-dns-744ffd65bc-qqb6h\" (UID: \"7bab51b8-290d-45bf-b81b-6fa629125e05\") " pod="openstack/dnsmasq-dns-744ffd65bc-qqb6h" Jan 05 22:09:22 crc kubenswrapper[4910]: I0105 22:09:22.986584 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7bab51b8-290d-45bf-b81b-6fa629125e05-config\") pod \"dnsmasq-dns-744ffd65bc-qqb6h\" (UID: \"7bab51b8-290d-45bf-b81b-6fa629125e05\") " pod="openstack/dnsmasq-dns-744ffd65bc-qqb6h" Jan 05 22:09:23 crc kubenswrapper[4910]: I0105 22:09:23.089135 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7bab51b8-290d-45bf-b81b-6fa629125e05-dns-svc\") pod \"dnsmasq-dns-744ffd65bc-qqb6h\" (UID: \"7bab51b8-290d-45bf-b81b-6fa629125e05\") " pod="openstack/dnsmasq-dns-744ffd65bc-qqb6h" Jan 05 22:09:23 crc kubenswrapper[4910]: I0105 22:09:23.089245 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tr88j\" (UniqueName: \"kubernetes.io/projected/7bab51b8-290d-45bf-b81b-6fa629125e05-kube-api-access-tr88j\") pod \"dnsmasq-dns-744ffd65bc-qqb6h\" (UID: \"7bab51b8-290d-45bf-b81b-6fa629125e05\") " pod="openstack/dnsmasq-dns-744ffd65bc-qqb6h" Jan 05 22:09:23 crc kubenswrapper[4910]: I0105 22:09:23.089562 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7bab51b8-290d-45bf-b81b-6fa629125e05-config\") pod \"dnsmasq-dns-744ffd65bc-qqb6h\" (UID: \"7bab51b8-290d-45bf-b81b-6fa629125e05\") " pod="openstack/dnsmasq-dns-744ffd65bc-qqb6h" Jan 05 22:09:23 crc kubenswrapper[4910]: I0105 22:09:23.090727 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7bab51b8-290d-45bf-b81b-6fa629125e05-config\") pod \"dnsmasq-dns-744ffd65bc-qqb6h\" (UID: \"7bab51b8-290d-45bf-b81b-6fa629125e05\") " pod="openstack/dnsmasq-dns-744ffd65bc-qqb6h" Jan 05 22:09:23 crc kubenswrapper[4910]: I0105 22:09:23.090930 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7bab51b8-290d-45bf-b81b-6fa629125e05-dns-svc\") pod \"dnsmasq-dns-744ffd65bc-qqb6h\" (UID: \"7bab51b8-290d-45bf-b81b-6fa629125e05\") " pod="openstack/dnsmasq-dns-744ffd65bc-qqb6h" Jan 05 22:09:23 crc kubenswrapper[4910]: I0105 22:09:23.121904 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tr88j\" (UniqueName: \"kubernetes.io/projected/7bab51b8-290d-45bf-b81b-6fa629125e05-kube-api-access-tr88j\") pod \"dnsmasq-dns-744ffd65bc-qqb6h\" (UID: \"7bab51b8-290d-45bf-b81b-6fa629125e05\") " pod="openstack/dnsmasq-dns-744ffd65bc-qqb6h" Jan 05 22:09:23 crc kubenswrapper[4910]: I0105 22:09:23.248611 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-r4h6d"] Jan 05 22:09:23 crc kubenswrapper[4910]: I0105 22:09:23.264671 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-744ffd65bc-qqb6h" Jan 05 22:09:23 crc kubenswrapper[4910]: I0105 22:09:23.265484 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-jzdx6"] Jan 05 22:09:23 crc kubenswrapper[4910]: I0105 22:09:23.268761 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95f5f6995-jzdx6" Jan 05 22:09:23 crc kubenswrapper[4910]: I0105 22:09:23.275160 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-jzdx6"] Jan 05 22:09:23 crc kubenswrapper[4910]: I0105 22:09:23.293774 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/935e0e47-2ab3-473a-8567-c36883d62801-dns-svc\") pod \"dnsmasq-dns-95f5f6995-jzdx6\" (UID: \"935e0e47-2ab3-473a-8567-c36883d62801\") " pod="openstack/dnsmasq-dns-95f5f6995-jzdx6" Jan 05 22:09:23 crc kubenswrapper[4910]: I0105 22:09:23.293825 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8fmrn\" (UniqueName: \"kubernetes.io/projected/935e0e47-2ab3-473a-8567-c36883d62801-kube-api-access-8fmrn\") pod \"dnsmasq-dns-95f5f6995-jzdx6\" (UID: \"935e0e47-2ab3-473a-8567-c36883d62801\") " pod="openstack/dnsmasq-dns-95f5f6995-jzdx6" Jan 05 22:09:23 crc kubenswrapper[4910]: I0105 22:09:23.293891 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/935e0e47-2ab3-473a-8567-c36883d62801-config\") pod \"dnsmasq-dns-95f5f6995-jzdx6\" (UID: \"935e0e47-2ab3-473a-8567-c36883d62801\") " pod="openstack/dnsmasq-dns-95f5f6995-jzdx6" Jan 05 22:09:23 crc kubenswrapper[4910]: I0105 22:09:23.400826 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/935e0e47-2ab3-473a-8567-c36883d62801-dns-svc\") pod \"dnsmasq-dns-95f5f6995-jzdx6\" (UID: \"935e0e47-2ab3-473a-8567-c36883d62801\") " pod="openstack/dnsmasq-dns-95f5f6995-jzdx6" Jan 05 22:09:23 crc kubenswrapper[4910]: I0105 22:09:23.401871 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8fmrn\" (UniqueName: \"kubernetes.io/projected/935e0e47-2ab3-473a-8567-c36883d62801-kube-api-access-8fmrn\") pod \"dnsmasq-dns-95f5f6995-jzdx6\" (UID: \"935e0e47-2ab3-473a-8567-c36883d62801\") " pod="openstack/dnsmasq-dns-95f5f6995-jzdx6" Jan 05 22:09:23 crc kubenswrapper[4910]: I0105 22:09:23.401955 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/935e0e47-2ab3-473a-8567-c36883d62801-dns-svc\") pod \"dnsmasq-dns-95f5f6995-jzdx6\" (UID: \"935e0e47-2ab3-473a-8567-c36883d62801\") " pod="openstack/dnsmasq-dns-95f5f6995-jzdx6" Jan 05 22:09:23 crc kubenswrapper[4910]: I0105 22:09:23.402718 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/935e0e47-2ab3-473a-8567-c36883d62801-config\") pod \"dnsmasq-dns-95f5f6995-jzdx6\" (UID: \"935e0e47-2ab3-473a-8567-c36883d62801\") " pod="openstack/dnsmasq-dns-95f5f6995-jzdx6" Jan 05 22:09:23 crc kubenswrapper[4910]: I0105 22:09:23.408207 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/935e0e47-2ab3-473a-8567-c36883d62801-config\") pod \"dnsmasq-dns-95f5f6995-jzdx6\" (UID: \"935e0e47-2ab3-473a-8567-c36883d62801\") " pod="openstack/dnsmasq-dns-95f5f6995-jzdx6" Jan 05 22:09:23 crc kubenswrapper[4910]: I0105 22:09:23.422737 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8fmrn\" (UniqueName: \"kubernetes.io/projected/935e0e47-2ab3-473a-8567-c36883d62801-kube-api-access-8fmrn\") pod \"dnsmasq-dns-95f5f6995-jzdx6\" (UID: \"935e0e47-2ab3-473a-8567-c36883d62801\") " pod="openstack/dnsmasq-dns-95f5f6995-jzdx6" Jan 05 22:09:23 crc kubenswrapper[4910]: I0105 22:09:23.626357 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95f5f6995-jzdx6" Jan 05 22:09:23 crc kubenswrapper[4910]: I0105 22:09:23.826224 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-744ffd65bc-qqb6h"] Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.071407 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.072900 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.085630 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.085671 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.085907 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.085901 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.087985 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.086088 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-xj9t8" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.088739 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.107809 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.113227 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-jzdx6"] Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.118866 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7e2a3efd-2de7-493e-af91-900b224e5313-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.118915 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7e2a3efd-2de7-493e-af91-900b224e5313-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.118971 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.119005 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7e2a3efd-2de7-493e-af91-900b224e5313-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.119036 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7e2a3efd-2de7-493e-af91-900b224e5313-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.119068 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7e2a3efd-2de7-493e-af91-900b224e5313-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.119115 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7e2a3efd-2de7-493e-af91-900b224e5313-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.119165 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9p5p9\" (UniqueName: \"kubernetes.io/projected/7e2a3efd-2de7-493e-af91-900b224e5313-kube-api-access-9p5p9\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.119188 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7e2a3efd-2de7-493e-af91-900b224e5313-config-data\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.119214 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7e2a3efd-2de7-493e-af91-900b224e5313-pod-info\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.119247 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7e2a3efd-2de7-493e-af91-900b224e5313-server-conf\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: W0105 22:09:24.126564 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod935e0e47_2ab3_473a_8567_c36883d62801.slice/crio-8ab074d628521b49b924a9b92e79bb716c3bfdd6ec0c7e2a3da3595545a2619a WatchSource:0}: Error finding container 8ab074d628521b49b924a9b92e79bb716c3bfdd6ec0c7e2a3da3595545a2619a: Status 404 returned error can't find the container with id 8ab074d628521b49b924a9b92e79bb716c3bfdd6ec0c7e2a3da3595545a2619a Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.221570 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7e2a3efd-2de7-493e-af91-900b224e5313-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.221957 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7e2a3efd-2de7-493e-af91-900b224e5313-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.221993 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7e2a3efd-2de7-493e-af91-900b224e5313-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.222039 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7e2a3efd-2de7-493e-af91-900b224e5313-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.222076 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9p5p9\" (UniqueName: \"kubernetes.io/projected/7e2a3efd-2de7-493e-af91-900b224e5313-kube-api-access-9p5p9\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.222099 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7e2a3efd-2de7-493e-af91-900b224e5313-config-data\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.222142 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7e2a3efd-2de7-493e-af91-900b224e5313-pod-info\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.222177 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7e2a3efd-2de7-493e-af91-900b224e5313-server-conf\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.222240 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7e2a3efd-2de7-493e-af91-900b224e5313-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.222267 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7e2a3efd-2de7-493e-af91-900b224e5313-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.222313 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.222664 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.223673 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7e2a3efd-2de7-493e-af91-900b224e5313-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.222171 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7e2a3efd-2de7-493e-af91-900b224e5313-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.229482 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7e2a3efd-2de7-493e-af91-900b224e5313-server-conf\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.230051 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7e2a3efd-2de7-493e-af91-900b224e5313-config-data\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.231397 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7e2a3efd-2de7-493e-af91-900b224e5313-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.232668 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7e2a3efd-2de7-493e-af91-900b224e5313-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.238625 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7e2a3efd-2de7-493e-af91-900b224e5313-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.243395 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7e2a3efd-2de7-493e-af91-900b224e5313-pod-info\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.251840 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7e2a3efd-2de7-493e-af91-900b224e5313-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.261563 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9p5p9\" (UniqueName: \"kubernetes.io/projected/7e2a3efd-2de7-493e-af91-900b224e5313-kube-api-access-9p5p9\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.271737 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.390950 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.392246 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.394941 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.396257 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.397335 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.397483 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.397809 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.402444 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.402716 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-hf4vk" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.403171 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.413381 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.423478 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-744ffd65bc-qqb6h" event={"ID":"7bab51b8-290d-45bf-b81b-6fa629125e05","Type":"ContainerStarted","Data":"722e0f67738f442021838569a17460289c33dc89e8c97df045a07b61d9baf45e"} Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.434017 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95f5f6995-jzdx6" event={"ID":"935e0e47-2ab3-473a-8567-c36883d62801","Type":"ContainerStarted","Data":"8ab074d628521b49b924a9b92e79bb716c3bfdd6ec0c7e2a3da3595545a2619a"} Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.526394 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b9cedfb5-8c45-434f-b04d-694bf6d600b8-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.526445 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b9cedfb5-8c45-434f-b04d-694bf6d600b8-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.526470 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b9cedfb5-8c45-434f-b04d-694bf6d600b8-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.526493 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b9cedfb5-8c45-434f-b04d-694bf6d600b8-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.526519 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b9cedfb5-8c45-434f-b04d-694bf6d600b8-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.526539 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b9cedfb5-8c45-434f-b04d-694bf6d600b8-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.526574 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b9cedfb5-8c45-434f-b04d-694bf6d600b8-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.526597 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b9cedfb5-8c45-434f-b04d-694bf6d600b8-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.526627 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.526647 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nkqrn\" (UniqueName: \"kubernetes.io/projected/b9cedfb5-8c45-434f-b04d-694bf6d600b8-kube-api-access-nkqrn\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.526688 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b9cedfb5-8c45-434f-b04d-694bf6d600b8-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.627907 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b9cedfb5-8c45-434f-b04d-694bf6d600b8-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.627952 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b9cedfb5-8c45-434f-b04d-694bf6d600b8-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.627989 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b9cedfb5-8c45-434f-b04d-694bf6d600b8-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.628013 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b9cedfb5-8c45-434f-b04d-694bf6d600b8-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.628046 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.628071 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nkqrn\" (UniqueName: \"kubernetes.io/projected/b9cedfb5-8c45-434f-b04d-694bf6d600b8-kube-api-access-nkqrn\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.630045 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b9cedfb5-8c45-434f-b04d-694bf6d600b8-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.630161 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b9cedfb5-8c45-434f-b04d-694bf6d600b8-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.630193 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.635256 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b9cedfb5-8c45-434f-b04d-694bf6d600b8-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.635569 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b9cedfb5-8c45-434f-b04d-694bf6d600b8-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.646255 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b9cedfb5-8c45-434f-b04d-694bf6d600b8-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.646345 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b9cedfb5-8c45-434f-b04d-694bf6d600b8-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.646407 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b9cedfb5-8c45-434f-b04d-694bf6d600b8-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.646436 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b9cedfb5-8c45-434f-b04d-694bf6d600b8-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.646474 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b9cedfb5-8c45-434f-b04d-694bf6d600b8-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.647438 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b9cedfb5-8c45-434f-b04d-694bf6d600b8-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.648083 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b9cedfb5-8c45-434f-b04d-694bf6d600b8-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.648287 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b9cedfb5-8c45-434f-b04d-694bf6d600b8-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.650010 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b9cedfb5-8c45-434f-b04d-694bf6d600b8-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.651111 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b9cedfb5-8c45-434f-b04d-694bf6d600b8-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.655305 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nkqrn\" (UniqueName: \"kubernetes.io/projected/b9cedfb5-8c45-434f-b04d-694bf6d600b8-kube-api-access-nkqrn\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.666026 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:24 crc kubenswrapper[4910]: I0105 22:09:24.778460 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.001327 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.646342 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.648097 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.650880 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-xlgfv" Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.651069 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.652088 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.655472 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.661159 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.669360 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.771669 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-57vm5\" (UniqueName: \"kubernetes.io/projected/2cb18efe-a80d-4657-921d-af4a18ae279d-kube-api-access-57vm5\") pod \"openstack-galera-0\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") " pod="openstack/openstack-galera-0" Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.771758 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"openstack-galera-0\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") " pod="openstack/openstack-galera-0" Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.771784 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2cb18efe-a80d-4657-921d-af4a18ae279d-operator-scripts\") pod \"openstack-galera-0\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") " pod="openstack/openstack-galera-0" Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.771829 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2cb18efe-a80d-4657-921d-af4a18ae279d-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") " pod="openstack/openstack-galera-0" Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.771872 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2cb18efe-a80d-4657-921d-af4a18ae279d-kolla-config\") pod \"openstack-galera-0\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") " pod="openstack/openstack-galera-0" Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.771910 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2cb18efe-a80d-4657-921d-af4a18ae279d-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") " pod="openstack/openstack-galera-0" Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.771931 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2cb18efe-a80d-4657-921d-af4a18ae279d-config-data-default\") pod \"openstack-galera-0\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") " pod="openstack/openstack-galera-0" Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.771982 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2cb18efe-a80d-4657-921d-af4a18ae279d-config-data-generated\") pod \"openstack-galera-0\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") " pod="openstack/openstack-galera-0" Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.873213 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2cb18efe-a80d-4657-921d-af4a18ae279d-kolla-config\") pod \"openstack-galera-0\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") " pod="openstack/openstack-galera-0" Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.873290 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2cb18efe-a80d-4657-921d-af4a18ae279d-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") " pod="openstack/openstack-galera-0" Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.873323 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2cb18efe-a80d-4657-921d-af4a18ae279d-config-data-default\") pod \"openstack-galera-0\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") " pod="openstack/openstack-galera-0" Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.873440 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2cb18efe-a80d-4657-921d-af4a18ae279d-config-data-generated\") pod \"openstack-galera-0\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") " pod="openstack/openstack-galera-0" Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.873469 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-57vm5\" (UniqueName: \"kubernetes.io/projected/2cb18efe-a80d-4657-921d-af4a18ae279d-kube-api-access-57vm5\") pod \"openstack-galera-0\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") " pod="openstack/openstack-galera-0" Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.873494 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"openstack-galera-0\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") " pod="openstack/openstack-galera-0" Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.873514 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2cb18efe-a80d-4657-921d-af4a18ae279d-operator-scripts\") pod \"openstack-galera-0\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") " pod="openstack/openstack-galera-0" Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.873539 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2cb18efe-a80d-4657-921d-af4a18ae279d-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") " pod="openstack/openstack-galera-0" Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.875083 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2cb18efe-a80d-4657-921d-af4a18ae279d-kolla-config\") pod \"openstack-galera-0\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") " pod="openstack/openstack-galera-0" Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.875381 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"openstack-galera-0\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/openstack-galera-0" Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.875456 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2cb18efe-a80d-4657-921d-af4a18ae279d-config-data-generated\") pod \"openstack-galera-0\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") " pod="openstack/openstack-galera-0" Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.876310 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2cb18efe-a80d-4657-921d-af4a18ae279d-config-data-default\") pod \"openstack-galera-0\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") " pod="openstack/openstack-galera-0" Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.877382 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2cb18efe-a80d-4657-921d-af4a18ae279d-operator-scripts\") pod \"openstack-galera-0\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") " pod="openstack/openstack-galera-0" Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.878242 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2cb18efe-a80d-4657-921d-af4a18ae279d-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") " pod="openstack/openstack-galera-0" Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.888422 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2cb18efe-a80d-4657-921d-af4a18ae279d-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") " pod="openstack/openstack-galera-0" Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.899696 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-57vm5\" (UniqueName: \"kubernetes.io/projected/2cb18efe-a80d-4657-921d-af4a18ae279d-kube-api-access-57vm5\") pod \"openstack-galera-0\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") " pod="openstack/openstack-galera-0" Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.927663 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"openstack-galera-0\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") " pod="openstack/openstack-galera-0" Jan 05 22:09:25 crc kubenswrapper[4910]: I0105 22:09:25.975397 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 05 22:09:26 crc kubenswrapper[4910]: I0105 22:09:26.930321 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 05 22:09:26 crc kubenswrapper[4910]: I0105 22:09:26.931590 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 05 22:09:26 crc kubenswrapper[4910]: I0105 22:09:26.935079 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-6b5kw" Jan 05 22:09:26 crc kubenswrapper[4910]: I0105 22:09:26.935139 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Jan 05 22:09:26 crc kubenswrapper[4910]: I0105 22:09:26.935079 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Jan 05 22:09:26 crc kubenswrapper[4910]: I0105 22:09:26.936304 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Jan 05 22:09:26 crc kubenswrapper[4910]: I0105 22:09:26.943932 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.058157 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.059944 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.063759 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-xbx74" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.063885 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.066958 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.074644 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.103782 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") " pod="openstack/openstack-cell1-galera-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.103841 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-cell1-galera-0\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") " pod="openstack/openstack-cell1-galera-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.103869 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x49n4\" (UniqueName: \"kubernetes.io/projected/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-kube-api-access-x49n4\") pod \"openstack-cell1-galera-0\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") " pod="openstack/openstack-cell1-galera-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.103904 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") " pod="openstack/openstack-cell1-galera-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.103939 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") " pod="openstack/openstack-cell1-galera-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.103976 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") " pod="openstack/openstack-cell1-galera-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.104006 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") " pod="openstack/openstack-cell1-galera-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.104048 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") " pod="openstack/openstack-cell1-galera-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.208619 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") " pod="openstack/openstack-cell1-galera-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.208687 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") " pod="openstack/openstack-cell1-galera-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.208734 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9m2j8\" (UniqueName: \"kubernetes.io/projected/39608078-4c49-4ca6-b9d4-6cdd37d89f91-kube-api-access-9m2j8\") pod \"memcached-0\" (UID: \"39608078-4c49-4ca6-b9d4-6cdd37d89f91\") " pod="openstack/memcached-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.208780 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") " pod="openstack/openstack-cell1-galera-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.208802 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39608078-4c49-4ca6-b9d4-6cdd37d89f91-combined-ca-bundle\") pod \"memcached-0\" (UID: \"39608078-4c49-4ca6-b9d4-6cdd37d89f91\") " pod="openstack/memcached-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.208847 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") " pod="openstack/openstack-cell1-galera-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.208878 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/39608078-4c49-4ca6-b9d4-6cdd37d89f91-memcached-tls-certs\") pod \"memcached-0\" (UID: \"39608078-4c49-4ca6-b9d4-6cdd37d89f91\") " pod="openstack/memcached-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.208917 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/39608078-4c49-4ca6-b9d4-6cdd37d89f91-config-data\") pod \"memcached-0\" (UID: \"39608078-4c49-4ca6-b9d4-6cdd37d89f91\") " pod="openstack/memcached-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.208951 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-cell1-galera-0\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") " pod="openstack/openstack-cell1-galera-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.208990 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x49n4\" (UniqueName: \"kubernetes.io/projected/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-kube-api-access-x49n4\") pod \"openstack-cell1-galera-0\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") " pod="openstack/openstack-cell1-galera-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.209493 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/39608078-4c49-4ca6-b9d4-6cdd37d89f91-kolla-config\") pod \"memcached-0\" (UID: \"39608078-4c49-4ca6-b9d4-6cdd37d89f91\") " pod="openstack/memcached-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.209570 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") " pod="openstack/openstack-cell1-galera-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.209653 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") " pod="openstack/openstack-cell1-galera-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.209917 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-cell1-galera-0\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/openstack-cell1-galera-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.210839 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") " pod="openstack/openstack-cell1-galera-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.210874 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") " pod="openstack/openstack-cell1-galera-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.211605 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") " pod="openstack/openstack-cell1-galera-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.212904 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") " pod="openstack/openstack-cell1-galera-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.218384 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") " pod="openstack/openstack-cell1-galera-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.221874 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") " pod="openstack/openstack-cell1-galera-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.228405 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x49n4\" (UniqueName: \"kubernetes.io/projected/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-kube-api-access-x49n4\") pod \"openstack-cell1-galera-0\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") " pod="openstack/openstack-cell1-galera-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.239305 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-cell1-galera-0\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") " pod="openstack/openstack-cell1-galera-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.261550 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.311282 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/39608078-4c49-4ca6-b9d4-6cdd37d89f91-kolla-config\") pod \"memcached-0\" (UID: \"39608078-4c49-4ca6-b9d4-6cdd37d89f91\") " pod="openstack/memcached-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.311424 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9m2j8\" (UniqueName: \"kubernetes.io/projected/39608078-4c49-4ca6-b9d4-6cdd37d89f91-kube-api-access-9m2j8\") pod \"memcached-0\" (UID: \"39608078-4c49-4ca6-b9d4-6cdd37d89f91\") " pod="openstack/memcached-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.311459 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39608078-4c49-4ca6-b9d4-6cdd37d89f91-combined-ca-bundle\") pod \"memcached-0\" (UID: \"39608078-4c49-4ca6-b9d4-6cdd37d89f91\") " pod="openstack/memcached-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.311492 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/39608078-4c49-4ca6-b9d4-6cdd37d89f91-memcached-tls-certs\") pod \"memcached-0\" (UID: \"39608078-4c49-4ca6-b9d4-6cdd37d89f91\") " pod="openstack/memcached-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.311518 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/39608078-4c49-4ca6-b9d4-6cdd37d89f91-config-data\") pod \"memcached-0\" (UID: \"39608078-4c49-4ca6-b9d4-6cdd37d89f91\") " pod="openstack/memcached-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.312318 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/39608078-4c49-4ca6-b9d4-6cdd37d89f91-config-data\") pod \"memcached-0\" (UID: \"39608078-4c49-4ca6-b9d4-6cdd37d89f91\") " pod="openstack/memcached-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.312819 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/39608078-4c49-4ca6-b9d4-6cdd37d89f91-kolla-config\") pod \"memcached-0\" (UID: \"39608078-4c49-4ca6-b9d4-6cdd37d89f91\") " pod="openstack/memcached-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.316171 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39608078-4c49-4ca6-b9d4-6cdd37d89f91-combined-ca-bundle\") pod \"memcached-0\" (UID: \"39608078-4c49-4ca6-b9d4-6cdd37d89f91\") " pod="openstack/memcached-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.319247 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/39608078-4c49-4ca6-b9d4-6cdd37d89f91-memcached-tls-certs\") pod \"memcached-0\" (UID: \"39608078-4c49-4ca6-b9d4-6cdd37d89f91\") " pod="openstack/memcached-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.332731 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9m2j8\" (UniqueName: \"kubernetes.io/projected/39608078-4c49-4ca6-b9d4-6cdd37d89f91-kube-api-access-9m2j8\") pod \"memcached-0\" (UID: \"39608078-4c49-4ca6-b9d4-6cdd37d89f91\") " pod="openstack/memcached-0" Jan 05 22:09:27 crc kubenswrapper[4910]: I0105 22:09:27.380373 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 05 22:09:28 crc kubenswrapper[4910]: W0105 22:09:28.067698 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7e2a3efd_2de7_493e_af91_900b224e5313.slice/crio-257a995f3b502011a7d5689ff58613770560f7c76b1e31d51c2505cdaa144b93 WatchSource:0}: Error finding container 257a995f3b502011a7d5689ff58613770560f7c76b1e31d51c2505cdaa144b93: Status 404 returned error can't find the container with id 257a995f3b502011a7d5689ff58613770560f7c76b1e31d51c2505cdaa144b93 Jan 05 22:09:28 crc kubenswrapper[4910]: I0105 22:09:28.468441 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7e2a3efd-2de7-493e-af91-900b224e5313","Type":"ContainerStarted","Data":"257a995f3b502011a7d5689ff58613770560f7c76b1e31d51c2505cdaa144b93"} Jan 05 22:09:28 crc kubenswrapper[4910]: I0105 22:09:28.872091 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 05 22:09:28 crc kubenswrapper[4910]: I0105 22:09:28.873456 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 05 22:09:28 crc kubenswrapper[4910]: I0105 22:09:28.877916 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-zkd8m" Jan 05 22:09:28 crc kubenswrapper[4910]: I0105 22:09:28.881623 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 05 22:09:29 crc kubenswrapper[4910]: I0105 22:09:29.044715 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4ppb\" (UniqueName: \"kubernetes.io/projected/db98b242-8ce3-4bc6-b04a-e22403612899-kube-api-access-v4ppb\") pod \"kube-state-metrics-0\" (UID: \"db98b242-8ce3-4bc6-b04a-e22403612899\") " pod="openstack/kube-state-metrics-0" Jan 05 22:09:29 crc kubenswrapper[4910]: I0105 22:09:29.149356 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4ppb\" (UniqueName: \"kubernetes.io/projected/db98b242-8ce3-4bc6-b04a-e22403612899-kube-api-access-v4ppb\") pod \"kube-state-metrics-0\" (UID: \"db98b242-8ce3-4bc6-b04a-e22403612899\") " pod="openstack/kube-state-metrics-0" Jan 05 22:09:29 crc kubenswrapper[4910]: I0105 22:09:29.182077 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4ppb\" (UniqueName: \"kubernetes.io/projected/db98b242-8ce3-4bc6-b04a-e22403612899-kube-api-access-v4ppb\") pod \"kube-state-metrics-0\" (UID: \"db98b242-8ce3-4bc6-b04a-e22403612899\") " pod="openstack/kube-state-metrics-0" Jan 05 22:09:29 crc kubenswrapper[4910]: I0105 22:09:29.236319 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.089670 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-cfp97"] Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.091347 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-cfp97" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.094023 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.094288 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.100582 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-n5582" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.105961 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-cfp97"] Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.118975 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-9g2kt"] Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.121651 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-9g2kt" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.130005 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-9g2kt"] Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.220019 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9253fb1e-9dce-4e54-80ee-fba5e3152596-combined-ca-bundle\") pod \"ovn-controller-cfp97\" (UID: \"9253fb1e-9dce-4e54-80ee-fba5e3152596\") " pod="openstack/ovn-controller-cfp97" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.220063 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9253fb1e-9dce-4e54-80ee-fba5e3152596-var-run-ovn\") pod \"ovn-controller-cfp97\" (UID: \"9253fb1e-9dce-4e54-80ee-fba5e3152596\") " pod="openstack/ovn-controller-cfp97" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.220088 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/780aad6a-41ff-410c-a6fc-6be2faf38b6f-var-log\") pod \"ovn-controller-ovs-9g2kt\" (UID: \"780aad6a-41ff-410c-a6fc-6be2faf38b6f\") " pod="openstack/ovn-controller-ovs-9g2kt" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.220150 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hclzn\" (UniqueName: \"kubernetes.io/projected/780aad6a-41ff-410c-a6fc-6be2faf38b6f-kube-api-access-hclzn\") pod \"ovn-controller-ovs-9g2kt\" (UID: \"780aad6a-41ff-410c-a6fc-6be2faf38b6f\") " pod="openstack/ovn-controller-ovs-9g2kt" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.220233 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/780aad6a-41ff-410c-a6fc-6be2faf38b6f-var-run\") pod \"ovn-controller-ovs-9g2kt\" (UID: \"780aad6a-41ff-410c-a6fc-6be2faf38b6f\") " pod="openstack/ovn-controller-ovs-9g2kt" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.220250 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9253fb1e-9dce-4e54-80ee-fba5e3152596-var-log-ovn\") pod \"ovn-controller-cfp97\" (UID: \"9253fb1e-9dce-4e54-80ee-fba5e3152596\") " pod="openstack/ovn-controller-cfp97" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.220275 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4m82w\" (UniqueName: \"kubernetes.io/projected/9253fb1e-9dce-4e54-80ee-fba5e3152596-kube-api-access-4m82w\") pod \"ovn-controller-cfp97\" (UID: \"9253fb1e-9dce-4e54-80ee-fba5e3152596\") " pod="openstack/ovn-controller-cfp97" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.220295 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9253fb1e-9dce-4e54-80ee-fba5e3152596-scripts\") pod \"ovn-controller-cfp97\" (UID: \"9253fb1e-9dce-4e54-80ee-fba5e3152596\") " pod="openstack/ovn-controller-cfp97" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.220318 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9253fb1e-9dce-4e54-80ee-fba5e3152596-var-run\") pod \"ovn-controller-cfp97\" (UID: \"9253fb1e-9dce-4e54-80ee-fba5e3152596\") " pod="openstack/ovn-controller-cfp97" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.220555 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/780aad6a-41ff-410c-a6fc-6be2faf38b6f-etc-ovs\") pod \"ovn-controller-ovs-9g2kt\" (UID: \"780aad6a-41ff-410c-a6fc-6be2faf38b6f\") " pod="openstack/ovn-controller-ovs-9g2kt" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.220637 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/780aad6a-41ff-410c-a6fc-6be2faf38b6f-scripts\") pod \"ovn-controller-ovs-9g2kt\" (UID: \"780aad6a-41ff-410c-a6fc-6be2faf38b6f\") " pod="openstack/ovn-controller-ovs-9g2kt" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.220674 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/9253fb1e-9dce-4e54-80ee-fba5e3152596-ovn-controller-tls-certs\") pod \"ovn-controller-cfp97\" (UID: \"9253fb1e-9dce-4e54-80ee-fba5e3152596\") " pod="openstack/ovn-controller-cfp97" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.220777 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/780aad6a-41ff-410c-a6fc-6be2faf38b6f-var-lib\") pod \"ovn-controller-ovs-9g2kt\" (UID: \"780aad6a-41ff-410c-a6fc-6be2faf38b6f\") " pod="openstack/ovn-controller-ovs-9g2kt" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.322817 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/780aad6a-41ff-410c-a6fc-6be2faf38b6f-etc-ovs\") pod \"ovn-controller-ovs-9g2kt\" (UID: \"780aad6a-41ff-410c-a6fc-6be2faf38b6f\") " pod="openstack/ovn-controller-ovs-9g2kt" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.322912 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/780aad6a-41ff-410c-a6fc-6be2faf38b6f-scripts\") pod \"ovn-controller-ovs-9g2kt\" (UID: \"780aad6a-41ff-410c-a6fc-6be2faf38b6f\") " pod="openstack/ovn-controller-ovs-9g2kt" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.322954 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/9253fb1e-9dce-4e54-80ee-fba5e3152596-ovn-controller-tls-certs\") pod \"ovn-controller-cfp97\" (UID: \"9253fb1e-9dce-4e54-80ee-fba5e3152596\") " pod="openstack/ovn-controller-cfp97" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.323038 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/780aad6a-41ff-410c-a6fc-6be2faf38b6f-var-lib\") pod \"ovn-controller-ovs-9g2kt\" (UID: \"780aad6a-41ff-410c-a6fc-6be2faf38b6f\") " pod="openstack/ovn-controller-ovs-9g2kt" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.323088 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9253fb1e-9dce-4e54-80ee-fba5e3152596-combined-ca-bundle\") pod \"ovn-controller-cfp97\" (UID: \"9253fb1e-9dce-4e54-80ee-fba5e3152596\") " pod="openstack/ovn-controller-cfp97" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.323152 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9253fb1e-9dce-4e54-80ee-fba5e3152596-var-run-ovn\") pod \"ovn-controller-cfp97\" (UID: \"9253fb1e-9dce-4e54-80ee-fba5e3152596\") " pod="openstack/ovn-controller-cfp97" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.323188 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/780aad6a-41ff-410c-a6fc-6be2faf38b6f-var-log\") pod \"ovn-controller-ovs-9g2kt\" (UID: \"780aad6a-41ff-410c-a6fc-6be2faf38b6f\") " pod="openstack/ovn-controller-ovs-9g2kt" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.323252 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hclzn\" (UniqueName: \"kubernetes.io/projected/780aad6a-41ff-410c-a6fc-6be2faf38b6f-kube-api-access-hclzn\") pod \"ovn-controller-ovs-9g2kt\" (UID: \"780aad6a-41ff-410c-a6fc-6be2faf38b6f\") " pod="openstack/ovn-controller-ovs-9g2kt" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.323302 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/780aad6a-41ff-410c-a6fc-6be2faf38b6f-var-run\") pod \"ovn-controller-ovs-9g2kt\" (UID: \"780aad6a-41ff-410c-a6fc-6be2faf38b6f\") " pod="openstack/ovn-controller-ovs-9g2kt" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.323332 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9253fb1e-9dce-4e54-80ee-fba5e3152596-var-log-ovn\") pod \"ovn-controller-cfp97\" (UID: \"9253fb1e-9dce-4e54-80ee-fba5e3152596\") " pod="openstack/ovn-controller-cfp97" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.323379 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4m82w\" (UniqueName: \"kubernetes.io/projected/9253fb1e-9dce-4e54-80ee-fba5e3152596-kube-api-access-4m82w\") pod \"ovn-controller-cfp97\" (UID: \"9253fb1e-9dce-4e54-80ee-fba5e3152596\") " pod="openstack/ovn-controller-cfp97" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.323415 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9253fb1e-9dce-4e54-80ee-fba5e3152596-scripts\") pod \"ovn-controller-cfp97\" (UID: \"9253fb1e-9dce-4e54-80ee-fba5e3152596\") " pod="openstack/ovn-controller-cfp97" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.323428 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/780aad6a-41ff-410c-a6fc-6be2faf38b6f-etc-ovs\") pod \"ovn-controller-ovs-9g2kt\" (UID: \"780aad6a-41ff-410c-a6fc-6be2faf38b6f\") " pod="openstack/ovn-controller-ovs-9g2kt" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.323514 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/780aad6a-41ff-410c-a6fc-6be2faf38b6f-var-lib\") pod \"ovn-controller-ovs-9g2kt\" (UID: \"780aad6a-41ff-410c-a6fc-6be2faf38b6f\") " pod="openstack/ovn-controller-ovs-9g2kt" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.323532 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9253fb1e-9dce-4e54-80ee-fba5e3152596-var-run\") pod \"ovn-controller-cfp97\" (UID: \"9253fb1e-9dce-4e54-80ee-fba5e3152596\") " pod="openstack/ovn-controller-cfp97" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.323928 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9253fb1e-9dce-4e54-80ee-fba5e3152596-var-run-ovn\") pod \"ovn-controller-cfp97\" (UID: \"9253fb1e-9dce-4e54-80ee-fba5e3152596\") " pod="openstack/ovn-controller-cfp97" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.324029 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9253fb1e-9dce-4e54-80ee-fba5e3152596-var-run\") pod \"ovn-controller-cfp97\" (UID: \"9253fb1e-9dce-4e54-80ee-fba5e3152596\") " pod="openstack/ovn-controller-cfp97" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.324065 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/780aad6a-41ff-410c-a6fc-6be2faf38b6f-var-log\") pod \"ovn-controller-ovs-9g2kt\" (UID: \"780aad6a-41ff-410c-a6fc-6be2faf38b6f\") " pod="openstack/ovn-controller-ovs-9g2kt" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.324207 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/780aad6a-41ff-410c-a6fc-6be2faf38b6f-var-run\") pod \"ovn-controller-ovs-9g2kt\" (UID: \"780aad6a-41ff-410c-a6fc-6be2faf38b6f\") " pod="openstack/ovn-controller-ovs-9g2kt" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.324493 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9253fb1e-9dce-4e54-80ee-fba5e3152596-var-log-ovn\") pod \"ovn-controller-cfp97\" (UID: \"9253fb1e-9dce-4e54-80ee-fba5e3152596\") " pod="openstack/ovn-controller-cfp97" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.325687 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/780aad6a-41ff-410c-a6fc-6be2faf38b6f-scripts\") pod \"ovn-controller-ovs-9g2kt\" (UID: \"780aad6a-41ff-410c-a6fc-6be2faf38b6f\") " pod="openstack/ovn-controller-ovs-9g2kt" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.327449 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9253fb1e-9dce-4e54-80ee-fba5e3152596-scripts\") pod \"ovn-controller-cfp97\" (UID: \"9253fb1e-9dce-4e54-80ee-fba5e3152596\") " pod="openstack/ovn-controller-cfp97" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.331275 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/9253fb1e-9dce-4e54-80ee-fba5e3152596-ovn-controller-tls-certs\") pod \"ovn-controller-cfp97\" (UID: \"9253fb1e-9dce-4e54-80ee-fba5e3152596\") " pod="openstack/ovn-controller-cfp97" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.340893 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9253fb1e-9dce-4e54-80ee-fba5e3152596-combined-ca-bundle\") pod \"ovn-controller-cfp97\" (UID: \"9253fb1e-9dce-4e54-80ee-fba5e3152596\") " pod="openstack/ovn-controller-cfp97" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.341971 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hclzn\" (UniqueName: \"kubernetes.io/projected/780aad6a-41ff-410c-a6fc-6be2faf38b6f-kube-api-access-hclzn\") pod \"ovn-controller-ovs-9g2kt\" (UID: \"780aad6a-41ff-410c-a6fc-6be2faf38b6f\") " pod="openstack/ovn-controller-ovs-9g2kt" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.348218 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4m82w\" (UniqueName: \"kubernetes.io/projected/9253fb1e-9dce-4e54-80ee-fba5e3152596-kube-api-access-4m82w\") pod \"ovn-controller-cfp97\" (UID: \"9253fb1e-9dce-4e54-80ee-fba5e3152596\") " pod="openstack/ovn-controller-cfp97" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.412321 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-cfp97" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.445435 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-9g2kt" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.973907 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.976650 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.979731 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.979731 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.980041 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.980047 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.980495 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-j4bfw" Jan 05 22:09:33 crc kubenswrapper[4910]: I0105 22:09:33.986774 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 05 22:09:34 crc kubenswrapper[4910]: I0105 22:09:34.137355 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6909118-b0ce-402c-8bb4-7ce665250739-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") " pod="openstack/ovsdbserver-nb-0" Jan 05 22:09:34 crc kubenswrapper[4910]: I0105 22:09:34.137415 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k24n8\" (UniqueName: \"kubernetes.io/projected/c6909118-b0ce-402c-8bb4-7ce665250739-kube-api-access-k24n8\") pod \"ovsdbserver-nb-0\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") " pod="openstack/ovsdbserver-nb-0" Jan 05 22:09:34 crc kubenswrapper[4910]: I0105 22:09:34.137484 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6909118-b0ce-402c-8bb4-7ce665250739-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") " pod="openstack/ovsdbserver-nb-0" Jan 05 22:09:34 crc kubenswrapper[4910]: I0105 22:09:34.137523 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c6909118-b0ce-402c-8bb4-7ce665250739-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") " pod="openstack/ovsdbserver-nb-0" Jan 05 22:09:34 crc kubenswrapper[4910]: I0105 22:09:34.137573 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6909118-b0ce-402c-8bb4-7ce665250739-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") " pod="openstack/ovsdbserver-nb-0" Jan 05 22:09:34 crc kubenswrapper[4910]: I0105 22:09:34.137609 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-nb-0\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") " pod="openstack/ovsdbserver-nb-0" Jan 05 22:09:34 crc kubenswrapper[4910]: I0105 22:09:34.137731 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c6909118-b0ce-402c-8bb4-7ce665250739-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") " pod="openstack/ovsdbserver-nb-0" Jan 05 22:09:34 crc kubenswrapper[4910]: I0105 22:09:34.137914 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c6909118-b0ce-402c-8bb4-7ce665250739-config\") pod \"ovsdbserver-nb-0\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") " pod="openstack/ovsdbserver-nb-0" Jan 05 22:09:34 crc kubenswrapper[4910]: I0105 22:09:34.240440 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6909118-b0ce-402c-8bb4-7ce665250739-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") " pod="openstack/ovsdbserver-nb-0" Jan 05 22:09:34 crc kubenswrapper[4910]: I0105 22:09:34.240532 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c6909118-b0ce-402c-8bb4-7ce665250739-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") " pod="openstack/ovsdbserver-nb-0" Jan 05 22:09:34 crc kubenswrapper[4910]: I0105 22:09:34.240613 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6909118-b0ce-402c-8bb4-7ce665250739-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") " pod="openstack/ovsdbserver-nb-0" Jan 05 22:09:34 crc kubenswrapper[4910]: I0105 22:09:34.240659 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-nb-0\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") " pod="openstack/ovsdbserver-nb-0" Jan 05 22:09:34 crc kubenswrapper[4910]: I0105 22:09:34.240701 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c6909118-b0ce-402c-8bb4-7ce665250739-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") " pod="openstack/ovsdbserver-nb-0" Jan 05 22:09:34 crc kubenswrapper[4910]: I0105 22:09:34.240778 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c6909118-b0ce-402c-8bb4-7ce665250739-config\") pod \"ovsdbserver-nb-0\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") " pod="openstack/ovsdbserver-nb-0" Jan 05 22:09:34 crc kubenswrapper[4910]: I0105 22:09:34.241012 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6909118-b0ce-402c-8bb4-7ce665250739-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") " pod="openstack/ovsdbserver-nb-0" Jan 05 22:09:34 crc kubenswrapper[4910]: I0105 22:09:34.241044 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k24n8\" (UniqueName: \"kubernetes.io/projected/c6909118-b0ce-402c-8bb4-7ce665250739-kube-api-access-k24n8\") pod \"ovsdbserver-nb-0\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") " pod="openstack/ovsdbserver-nb-0" Jan 05 22:09:34 crc kubenswrapper[4910]: I0105 22:09:34.241570 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-nb-0\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/ovsdbserver-nb-0" Jan 05 22:09:34 crc kubenswrapper[4910]: I0105 22:09:34.241669 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c6909118-b0ce-402c-8bb4-7ce665250739-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") " pod="openstack/ovsdbserver-nb-0" Jan 05 22:09:34 crc kubenswrapper[4910]: I0105 22:09:34.242406 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c6909118-b0ce-402c-8bb4-7ce665250739-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") " pod="openstack/ovsdbserver-nb-0" Jan 05 22:09:34 crc kubenswrapper[4910]: I0105 22:09:34.242650 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c6909118-b0ce-402c-8bb4-7ce665250739-config\") pod \"ovsdbserver-nb-0\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") " pod="openstack/ovsdbserver-nb-0" Jan 05 22:09:34 crc kubenswrapper[4910]: I0105 22:09:34.246743 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6909118-b0ce-402c-8bb4-7ce665250739-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") " pod="openstack/ovsdbserver-nb-0" Jan 05 22:09:34 crc kubenswrapper[4910]: I0105 22:09:34.261811 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k24n8\" (UniqueName: \"kubernetes.io/projected/c6909118-b0ce-402c-8bb4-7ce665250739-kube-api-access-k24n8\") pod \"ovsdbserver-nb-0\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") " pod="openstack/ovsdbserver-nb-0" Jan 05 22:09:34 crc kubenswrapper[4910]: I0105 22:09:34.261909 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6909118-b0ce-402c-8bb4-7ce665250739-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") " pod="openstack/ovsdbserver-nb-0" Jan 05 22:09:34 crc kubenswrapper[4910]: I0105 22:09:34.262995 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6909118-b0ce-402c-8bb4-7ce665250739-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") " pod="openstack/ovsdbserver-nb-0" Jan 05 22:09:34 crc kubenswrapper[4910]: I0105 22:09:34.264232 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-nb-0\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") " pod="openstack/ovsdbserver-nb-0" Jan 05 22:09:34 crc kubenswrapper[4910]: I0105 22:09:34.293316 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.455227 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.457230 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.460881 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-nldwm" Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.460882 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.460881 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.461221 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.467271 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.582275 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") " pod="openstack/ovsdbserver-sb-0" Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.582325 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") " pod="openstack/ovsdbserver-sb-0" Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.582360 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") " pod="openstack/ovsdbserver-sb-0" Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.582610 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") " pod="openstack/ovsdbserver-sb-0" Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.582692 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-config\") pod \"ovsdbserver-sb-0\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") " pod="openstack/ovsdbserver-sb-0" Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.583136 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") " pod="openstack/ovsdbserver-sb-0" Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.583351 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") " pod="openstack/ovsdbserver-sb-0" Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.583388 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w2ssq\" (UniqueName: \"kubernetes.io/projected/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-kube-api-access-w2ssq\") pod \"ovsdbserver-sb-0\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") " pod="openstack/ovsdbserver-sb-0" Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.685799 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") " pod="openstack/ovsdbserver-sb-0" Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.685998 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-config\") pod \"ovsdbserver-sb-0\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") " pod="openstack/ovsdbserver-sb-0" Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.686064 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") " pod="openstack/ovsdbserver-sb-0" Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.686112 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") " pod="openstack/ovsdbserver-sb-0" Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.686155 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w2ssq\" (UniqueName: \"kubernetes.io/projected/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-kube-api-access-w2ssq\") pod \"ovsdbserver-sb-0\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") " pod="openstack/ovsdbserver-sb-0" Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.686249 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") " pod="openstack/ovsdbserver-sb-0" Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.686275 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") " pod="openstack/ovsdbserver-sb-0" Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.686314 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") " pod="openstack/ovsdbserver-sb-0" Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.686624 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/ovsdbserver-sb-0" Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.687520 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") " pod="openstack/ovsdbserver-sb-0" Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.687880 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-config\") pod \"ovsdbserver-sb-0\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") " pod="openstack/ovsdbserver-sb-0" Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.688270 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") " pod="openstack/ovsdbserver-sb-0" Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.705902 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") " pod="openstack/ovsdbserver-sb-0" Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.705999 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") " pod="openstack/ovsdbserver-sb-0" Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.708963 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") " pod="openstack/ovsdbserver-sb-0" Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.713133 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w2ssq\" (UniqueName: \"kubernetes.io/projected/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-kube-api-access-w2ssq\") pod \"ovsdbserver-sb-0\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") " pod="openstack/ovsdbserver-sb-0" Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.713847 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") " pod="openstack/ovsdbserver-sb-0" Jan 05 22:09:36 crc kubenswrapper[4910]: I0105 22:09:36.785865 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 05 22:09:37 crc kubenswrapper[4910]: E0105 22:09:37.883520 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33" Jan 05 22:09:37 crc kubenswrapper[4910]: E0105 22:09:37.883738 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xtfwn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-5f854695bc-9fzbj_openstack(87dec2ec-979f-41e3-b5b4-909962331a47): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 05 22:09:37 crc kubenswrapper[4910]: E0105 22:09:37.885084 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-5f854695bc-9fzbj" podUID="87dec2ec-979f-41e3-b5b4-909962331a47" Jan 05 22:09:38 crc kubenswrapper[4910]: E0105 22:09:38.786295 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33" Jan 05 22:09:38 crc kubenswrapper[4910]: E0105 22:09:38.786965 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tr88j,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-744ffd65bc-qqb6h_openstack(7bab51b8-290d-45bf-b81b-6fa629125e05): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 05 22:09:38 crc kubenswrapper[4910]: E0105 22:09:38.788545 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-744ffd65bc-qqb6h" podUID="7bab51b8-290d-45bf-b81b-6fa629125e05" Jan 05 22:09:38 crc kubenswrapper[4910]: E0105 22:09:38.880875 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33" Jan 05 22:09:38 crc kubenswrapper[4910]: E0105 22:09:38.881099 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-62d9p,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-84bb9d8bd9-r4h6d_openstack(bd9e7646-7aae-4286-8283-f166b4d60b38): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 05 22:09:38 crc kubenswrapper[4910]: E0105 22:09:38.882341 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-84bb9d8bd9-r4h6d" podUID="bd9e7646-7aae-4286-8283-f166b4d60b38" Jan 05 22:09:39 crc kubenswrapper[4910]: I0105 22:09:39.025164 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f854695bc-9fzbj" Jan 05 22:09:39 crc kubenswrapper[4910]: I0105 22:09:39.146540 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xtfwn\" (UniqueName: \"kubernetes.io/projected/87dec2ec-979f-41e3-b5b4-909962331a47-kube-api-access-xtfwn\") pod \"87dec2ec-979f-41e3-b5b4-909962331a47\" (UID: \"87dec2ec-979f-41e3-b5b4-909962331a47\") " Jan 05 22:09:39 crc kubenswrapper[4910]: I0105 22:09:39.146741 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/87dec2ec-979f-41e3-b5b4-909962331a47-dns-svc\") pod \"87dec2ec-979f-41e3-b5b4-909962331a47\" (UID: \"87dec2ec-979f-41e3-b5b4-909962331a47\") " Jan 05 22:09:39 crc kubenswrapper[4910]: I0105 22:09:39.146782 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87dec2ec-979f-41e3-b5b4-909962331a47-config\") pod \"87dec2ec-979f-41e3-b5b4-909962331a47\" (UID: \"87dec2ec-979f-41e3-b5b4-909962331a47\") " Jan 05 22:09:39 crc kubenswrapper[4910]: I0105 22:09:39.147274 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87dec2ec-979f-41e3-b5b4-909962331a47-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "87dec2ec-979f-41e3-b5b4-909962331a47" (UID: "87dec2ec-979f-41e3-b5b4-909962331a47"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:09:39 crc kubenswrapper[4910]: I0105 22:09:39.147702 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87dec2ec-979f-41e3-b5b4-909962331a47-config" (OuterVolumeSpecName: "config") pod "87dec2ec-979f-41e3-b5b4-909962331a47" (UID: "87dec2ec-979f-41e3-b5b4-909962331a47"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:09:39 crc kubenswrapper[4910]: I0105 22:09:39.156318 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87dec2ec-979f-41e3-b5b4-909962331a47-kube-api-access-xtfwn" (OuterVolumeSpecName: "kube-api-access-xtfwn") pod "87dec2ec-979f-41e3-b5b4-909962331a47" (UID: "87dec2ec-979f-41e3-b5b4-909962331a47"). InnerVolumeSpecName "kube-api-access-xtfwn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:09:39 crc kubenswrapper[4910]: I0105 22:09:39.248788 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87dec2ec-979f-41e3-b5b4-909962331a47-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:09:39 crc kubenswrapper[4910]: I0105 22:09:39.248826 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xtfwn\" (UniqueName: \"kubernetes.io/projected/87dec2ec-979f-41e3-b5b4-909962331a47-kube-api-access-xtfwn\") on node \"crc\" DevicePath \"\"" Jan 05 22:09:39 crc kubenswrapper[4910]: I0105 22:09:39.248845 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/87dec2ec-979f-41e3-b5b4-909962331a47-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 05 22:09:39 crc kubenswrapper[4910]: I0105 22:09:39.417333 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 05 22:09:39 crc kubenswrapper[4910]: I0105 22:09:39.439282 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 05 22:09:39 crc kubenswrapper[4910]: W0105 22:09:39.440909 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf9587597_0dcc_4c3a_b578_f9797dd2f9c1.slice/crio-dc39a73c9df32694a5df7333758fce61184ece2686dca01fd7a78be87892ad34 WatchSource:0}: Error finding container dc39a73c9df32694a5df7333758fce61184ece2686dca01fd7a78be87892ad34: Status 404 returned error can't find the container with id dc39a73c9df32694a5df7333758fce61184ece2686dca01fd7a78be87892ad34 Jan 05 22:09:39 crc kubenswrapper[4910]: W0105 22:09:39.443195 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb9cedfb5_8c45_434f_b04d_694bf6d600b8.slice/crio-c17220f847316999c4506c2acff54beeb887dc60e3db70fa4fdc73a07da0cd76 WatchSource:0}: Error finding container c17220f847316999c4506c2acff54beeb887dc60e3db70fa4fdc73a07da0cd76: Status 404 returned error can't find the container with id c17220f847316999c4506c2acff54beeb887dc60e3db70fa4fdc73a07da0cd76 Jan 05 22:09:39 crc kubenswrapper[4910]: I0105 22:09:39.448925 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 05 22:09:39 crc kubenswrapper[4910]: I0105 22:09:39.565365 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"db98b242-8ce3-4bc6-b04a-e22403612899","Type":"ContainerStarted","Data":"0cc5a53ae4132d67a7189face75f6bbefc33b4db2b7160287e2bf5d55437732e"} Jan 05 22:09:39 crc kubenswrapper[4910]: I0105 22:09:39.566653 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"f9587597-0dcc-4c3a-b578-f9797dd2f9c1","Type":"ContainerStarted","Data":"dc39a73c9df32694a5df7333758fce61184ece2686dca01fd7a78be87892ad34"} Jan 05 22:09:39 crc kubenswrapper[4910]: I0105 22:09:39.567947 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b9cedfb5-8c45-434f-b04d-694bf6d600b8","Type":"ContainerStarted","Data":"c17220f847316999c4506c2acff54beeb887dc60e3db70fa4fdc73a07da0cd76"} Jan 05 22:09:39 crc kubenswrapper[4910]: I0105 22:09:39.570747 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f854695bc-9fzbj" event={"ID":"87dec2ec-979f-41e3-b5b4-909962331a47","Type":"ContainerDied","Data":"d8fb2aecec539558ccb6453369bee54dd0e0bceda2cdc530081ca0f28cc0c2fa"} Jan 05 22:09:39 crc kubenswrapper[4910]: I0105 22:09:39.570830 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f854695bc-9fzbj" Jan 05 22:09:39 crc kubenswrapper[4910]: I0105 22:09:39.575611 4910 generic.go:334] "Generic (PLEG): container finished" podID="935e0e47-2ab3-473a-8567-c36883d62801" containerID="615205a20bf1e814c6020d601eee8a82bbd957ab147294f544e8549a5498bcd8" exitCode=0 Jan 05 22:09:39 crc kubenswrapper[4910]: I0105 22:09:39.576949 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95f5f6995-jzdx6" event={"ID":"935e0e47-2ab3-473a-8567-c36883d62801","Type":"ContainerDied","Data":"615205a20bf1e814c6020d601eee8a82bbd957ab147294f544e8549a5498bcd8"} Jan 05 22:09:39 crc kubenswrapper[4910]: I0105 22:09:39.686830 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 05 22:09:39 crc kubenswrapper[4910]: I0105 22:09:39.696393 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 05 22:09:39 crc kubenswrapper[4910]: I0105 22:09:39.708328 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-9fzbj"] Jan 05 22:09:39 crc kubenswrapper[4910]: I0105 22:09:39.716997 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-9fzbj"] Jan 05 22:09:39 crc kubenswrapper[4910]: I0105 22:09:39.922959 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-cfp97"] Jan 05 22:09:39 crc kubenswrapper[4910]: W0105 22:09:39.991473 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod39608078_4c49_4ca6_b9d4_6cdd37d89f91.slice/crio-3c88da6b0c96eda18ce1a771f4bf3b99935aa4de65bb73c2512ac94c7c0248fc WatchSource:0}: Error finding container 3c88da6b0c96eda18ce1a771f4bf3b99935aa4de65bb73c2512ac94c7c0248fc: Status 404 returned error can't find the container with id 3c88da6b0c96eda18ce1a771f4bf3b99935aa4de65bb73c2512ac94c7c0248fc Jan 05 22:09:40 crc kubenswrapper[4910]: W0105 22:09:40.090808 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9253fb1e_9dce_4e54_80ee_fba5e3152596.slice/crio-a0c16b818c8e40bedcb653d0eed1d86bbbe46f15ce82243cb48c16eee7b1d32e WatchSource:0}: Error finding container a0c16b818c8e40bedcb653d0eed1d86bbbe46f15ce82243cb48c16eee7b1d32e: Status 404 returned error can't find the container with id a0c16b818c8e40bedcb653d0eed1d86bbbe46f15ce82243cb48c16eee7b1d32e Jan 05 22:09:40 crc kubenswrapper[4910]: I0105 22:09:40.142475 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-9g2kt"] Jan 05 22:09:40 crc kubenswrapper[4910]: I0105 22:09:40.213271 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 05 22:09:40 crc kubenswrapper[4910]: I0105 22:09:40.304564 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 05 22:09:40 crc kubenswrapper[4910]: I0105 22:09:40.573534 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84bb9d8bd9-r4h6d" Jan 05 22:09:40 crc kubenswrapper[4910]: I0105 22:09:40.591036 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c","Type":"ContainerStarted","Data":"5a454293173b849fd64d11ff249f1e8fb8cfde1bc8277f1a44e5d944fe0b0ac6"} Jan 05 22:09:40 crc kubenswrapper[4910]: I0105 22:09:40.597673 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95f5f6995-jzdx6" event={"ID":"935e0e47-2ab3-473a-8567-c36883d62801","Type":"ContainerStarted","Data":"8662a7e76a65bd149e6eb96fd5ab7fb9fc2cd767165bb3e41fe97269d05c1b6a"} Jan 05 22:09:40 crc kubenswrapper[4910]: I0105 22:09:40.597727 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-95f5f6995-jzdx6" Jan 05 22:09:40 crc kubenswrapper[4910]: I0105 22:09:40.602998 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"c6909118-b0ce-402c-8bb4-7ce665250739","Type":"ContainerStarted","Data":"705c4d3ed11f60e755643b5bfc202d1f6039141c978bf5d2c5c5e34fe09a8aab"} Jan 05 22:09:40 crc kubenswrapper[4910]: I0105 22:09:40.604367 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"39608078-4c49-4ca6-b9d4-6cdd37d89f91","Type":"ContainerStarted","Data":"3c88da6b0c96eda18ce1a771f4bf3b99935aa4de65bb73c2512ac94c7c0248fc"} Jan 05 22:09:40 crc kubenswrapper[4910]: I0105 22:09:40.605424 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-cfp97" event={"ID":"9253fb1e-9dce-4e54-80ee-fba5e3152596","Type":"ContainerStarted","Data":"a0c16b818c8e40bedcb653d0eed1d86bbbe46f15ce82243cb48c16eee7b1d32e"} Jan 05 22:09:40 crc kubenswrapper[4910]: I0105 22:09:40.606242 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"2cb18efe-a80d-4657-921d-af4a18ae279d","Type":"ContainerStarted","Data":"e30bba8cb56bd6363291eb1e68f0992c5dacc1341f078f128e879d084067cc3b"} Jan 05 22:09:40 crc kubenswrapper[4910]: I0105 22:09:40.607208 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84bb9d8bd9-r4h6d" event={"ID":"bd9e7646-7aae-4286-8283-f166b4d60b38","Type":"ContainerDied","Data":"dd2cec7ab9e89079e41671792072c08803d8244fb8f8a391b216ad254ba4b94a"} Jan 05 22:09:40 crc kubenswrapper[4910]: I0105 22:09:40.607399 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84bb9d8bd9-r4h6d" Jan 05 22:09:40 crc kubenswrapper[4910]: I0105 22:09:40.608171 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-9g2kt" event={"ID":"780aad6a-41ff-410c-a6fc-6be2faf38b6f","Type":"ContainerStarted","Data":"0aa430ff91116b57d25f59680c6d3eabee0fd263542fdeaf8307fb0a292bc334"} Jan 05 22:09:40 crc kubenswrapper[4910]: I0105 22:09:40.626314 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-95f5f6995-jzdx6" podStartSLOduration=2.831645316 podStartE2EDuration="17.626274295s" podCreationTimestamp="2026-01-05 22:09:23 +0000 UTC" firstStartedPulling="2026-01-05 22:09:24.132628485 +0000 UTC m=+1095.710126155" lastFinishedPulling="2026-01-05 22:09:38.927257464 +0000 UTC m=+1110.504755134" observedRunningTime="2026-01-05 22:09:40.619696146 +0000 UTC m=+1112.197193826" watchObservedRunningTime="2026-01-05 22:09:40.626274295 +0000 UTC m=+1112.203771985" Jan 05 22:09:40 crc kubenswrapper[4910]: I0105 22:09:40.700734 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd9e7646-7aae-4286-8283-f166b4d60b38-config\") pod \"bd9e7646-7aae-4286-8283-f166b4d60b38\" (UID: \"bd9e7646-7aae-4286-8283-f166b4d60b38\") " Jan 05 22:09:40 crc kubenswrapper[4910]: I0105 22:09:40.700798 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-62d9p\" (UniqueName: \"kubernetes.io/projected/bd9e7646-7aae-4286-8283-f166b4d60b38-kube-api-access-62d9p\") pod \"bd9e7646-7aae-4286-8283-f166b4d60b38\" (UID: \"bd9e7646-7aae-4286-8283-f166b4d60b38\") " Jan 05 22:09:40 crc kubenswrapper[4910]: I0105 22:09:40.702541 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd9e7646-7aae-4286-8283-f166b4d60b38-config" (OuterVolumeSpecName: "config") pod "bd9e7646-7aae-4286-8283-f166b4d60b38" (UID: "bd9e7646-7aae-4286-8283-f166b4d60b38"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:09:40 crc kubenswrapper[4910]: I0105 22:09:40.709155 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd9e7646-7aae-4286-8283-f166b4d60b38-kube-api-access-62d9p" (OuterVolumeSpecName: "kube-api-access-62d9p") pod "bd9e7646-7aae-4286-8283-f166b4d60b38" (UID: "bd9e7646-7aae-4286-8283-f166b4d60b38"). InnerVolumeSpecName "kube-api-access-62d9p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:09:40 crc kubenswrapper[4910]: I0105 22:09:40.734175 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87dec2ec-979f-41e3-b5b4-909962331a47" path="/var/lib/kubelet/pods/87dec2ec-979f-41e3-b5b4-909962331a47/volumes" Jan 05 22:09:40 crc kubenswrapper[4910]: I0105 22:09:40.803510 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd9e7646-7aae-4286-8283-f166b4d60b38-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:09:40 crc kubenswrapper[4910]: I0105 22:09:40.803549 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-62d9p\" (UniqueName: \"kubernetes.io/projected/bd9e7646-7aae-4286-8283-f166b4d60b38-kube-api-access-62d9p\") on node \"crc\" DevicePath \"\"" Jan 05 22:09:40 crc kubenswrapper[4910]: I0105 22:09:40.972559 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-r4h6d"] Jan 05 22:09:40 crc kubenswrapper[4910]: I0105 22:09:40.979884 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-r4h6d"] Jan 05 22:09:41 crc kubenswrapper[4910]: E0105 22:09:41.142280 4910 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Jan 05 22:09:41 crc kubenswrapper[4910]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/7bab51b8-290d-45bf-b81b-6fa629125e05/volume-subpaths/dns-svc/init/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Jan 05 22:09:41 crc kubenswrapper[4910]: > podSandboxID="722e0f67738f442021838569a17460289c33dc89e8c97df045a07b61d9baf45e" Jan 05 22:09:41 crc kubenswrapper[4910]: E0105 22:09:41.142442 4910 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 05 22:09:41 crc kubenswrapper[4910]: init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tr88j,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-744ffd65bc-qqb6h_openstack(7bab51b8-290d-45bf-b81b-6fa629125e05): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/7bab51b8-290d-45bf-b81b-6fa629125e05/volume-subpaths/dns-svc/init/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Jan 05 22:09:41 crc kubenswrapper[4910]: > logger="UnhandledError" Jan 05 22:09:41 crc kubenswrapper[4910]: E0105 22:09:41.143638 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/7bab51b8-290d-45bf-b81b-6fa629125e05/volume-subpaths/dns-svc/init/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-744ffd65bc-qqb6h" podUID="7bab51b8-290d-45bf-b81b-6fa629125e05" Jan 05 22:09:41 crc kubenswrapper[4910]: I0105 22:09:41.620034 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7e2a3efd-2de7-493e-af91-900b224e5313","Type":"ContainerStarted","Data":"299a441a9ea2f52977cbffea7f3f23ff9a1fa10c75e20ad3f6d05cf9c52d97b4"} Jan 05 22:09:41 crc kubenswrapper[4910]: I0105 22:09:41.623850 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b9cedfb5-8c45-434f-b04d-694bf6d600b8","Type":"ContainerStarted","Data":"2dd0985809b50b7237e41b4d234a09fc5fdb093346ee879d550b4f63215e2788"} Jan 05 22:09:42 crc kubenswrapper[4910]: I0105 22:09:42.730772 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd9e7646-7aae-4286-8283-f166b4d60b38" path="/var/lib/kubelet/pods/bd9e7646-7aae-4286-8283-f166b4d60b38/volumes" Jan 05 22:09:48 crc kubenswrapper[4910]: I0105 22:09:48.628058 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-95f5f6995-jzdx6" Jan 05 22:09:48 crc kubenswrapper[4910]: I0105 22:09:48.678316 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"c6909118-b0ce-402c-8bb4-7ce665250739","Type":"ContainerStarted","Data":"839ae5ea9fbfa6a3aa2a1bb5b86ebdc2961253bc0eb7f9f7fe77393230b78a2e"} Jan 05 22:09:48 crc kubenswrapper[4910]: I0105 22:09:48.679894 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-744ffd65bc-qqb6h"] Jan 05 22:09:48 crc kubenswrapper[4910]: I0105 22:09:48.680675 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"39608078-4c49-4ca6-b9d4-6cdd37d89f91","Type":"ContainerStarted","Data":"8117e13cdc918455769d99f76275cbebcd1d57825a878291492d6665a99db931"} Jan 05 22:09:48 crc kubenswrapper[4910]: I0105 22:09:48.681629 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Jan 05 22:09:48 crc kubenswrapper[4910]: I0105 22:09:48.685912 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-cfp97" event={"ID":"9253fb1e-9dce-4e54-80ee-fba5e3152596","Type":"ContainerStarted","Data":"367036c1944402d903c48f5737322433b3dce3b8986e5bf1249815eb02e56af6"} Jan 05 22:09:48 crc kubenswrapper[4910]: I0105 22:09:48.686937 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-cfp97" Jan 05 22:09:48 crc kubenswrapper[4910]: I0105 22:09:48.689163 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"2cb18efe-a80d-4657-921d-af4a18ae279d","Type":"ContainerStarted","Data":"44a69179172486de1cfaee52c5b45f28ff6e2522c6ba8a153a50795a10335125"} Jan 05 22:09:48 crc kubenswrapper[4910]: I0105 22:09:48.695793 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"f9587597-0dcc-4c3a-b578-f9797dd2f9c1","Type":"ContainerStarted","Data":"7dc2abc97367e4404626e780ecb73ce8187a7fc4d65dc7e368107933d3d9b81a"} Jan 05 22:09:48 crc kubenswrapper[4910]: I0105 22:09:48.733713 4910 generic.go:334] "Generic (PLEG): container finished" podID="780aad6a-41ff-410c-a6fc-6be2faf38b6f" containerID="05d94ba54d454e230e030748a2bf07bce00a32127cf9fc1e78a93b76e657c064" exitCode=0 Jan 05 22:09:48 crc kubenswrapper[4910]: I0105 22:09:48.777829 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-9g2kt" event={"ID":"780aad6a-41ff-410c-a6fc-6be2faf38b6f","Type":"ContainerDied","Data":"05d94ba54d454e230e030748a2bf07bce00a32127cf9fc1e78a93b76e657c064"} Jan 05 22:09:48 crc kubenswrapper[4910]: I0105 22:09:48.777872 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c","Type":"ContainerStarted","Data":"ae292dd58468a3ca3fe41cf2714cbea7c847466d8e283f8fbe34e48f2fc358f9"} Jan 05 22:09:48 crc kubenswrapper[4910]: I0105 22:09:48.778058 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"db98b242-8ce3-4bc6-b04a-e22403612899","Type":"ContainerStarted","Data":"34e3a94f11898c05872cccd8fe29a521516732d502bea692be1e04ddae5e4717"} Jan 05 22:09:48 crc kubenswrapper[4910]: I0105 22:09:48.778357 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 05 22:09:48 crc kubenswrapper[4910]: I0105 22:09:48.862093 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=14.138839038 podStartE2EDuration="21.862066274s" podCreationTimestamp="2026-01-05 22:09:27 +0000 UTC" firstStartedPulling="2026-01-05 22:09:39.99347669 +0000 UTC m=+1111.570974360" lastFinishedPulling="2026-01-05 22:09:47.716703926 +0000 UTC m=+1119.294201596" observedRunningTime="2026-01-05 22:09:48.786479789 +0000 UTC m=+1120.363977459" watchObservedRunningTime="2026-01-05 22:09:48.862066274 +0000 UTC m=+1120.439563944" Jan 05 22:09:48 crc kubenswrapper[4910]: I0105 22:09:48.873140 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-cfp97" podStartSLOduration=8.133965099 podStartE2EDuration="15.8731043s" podCreationTimestamp="2026-01-05 22:09:33 +0000 UTC" firstStartedPulling="2026-01-05 22:09:40.094356545 +0000 UTC m=+1111.671854215" lastFinishedPulling="2026-01-05 22:09:47.833495746 +0000 UTC m=+1119.410993416" observedRunningTime="2026-01-05 22:09:48.832992422 +0000 UTC m=+1120.410490092" watchObservedRunningTime="2026-01-05 22:09:48.8731043 +0000 UTC m=+1120.450601970" Jan 05 22:09:48 crc kubenswrapper[4910]: I0105 22:09:48.898897 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=12.50136612 podStartE2EDuration="20.898876883s" podCreationTimestamp="2026-01-05 22:09:28 +0000 UTC" firstStartedPulling="2026-01-05 22:09:39.421089083 +0000 UTC m=+1110.998586753" lastFinishedPulling="2026-01-05 22:09:47.818599846 +0000 UTC m=+1119.396097516" observedRunningTime="2026-01-05 22:09:48.85819235 +0000 UTC m=+1120.435690020" watchObservedRunningTime="2026-01-05 22:09:48.898876883 +0000 UTC m=+1120.476374553" Jan 05 22:09:49 crc kubenswrapper[4910]: I0105 22:09:49.297313 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-744ffd65bc-qqb6h" Jan 05 22:09:49 crc kubenswrapper[4910]: I0105 22:09:49.412093 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tr88j\" (UniqueName: \"kubernetes.io/projected/7bab51b8-290d-45bf-b81b-6fa629125e05-kube-api-access-tr88j\") pod \"7bab51b8-290d-45bf-b81b-6fa629125e05\" (UID: \"7bab51b8-290d-45bf-b81b-6fa629125e05\") " Jan 05 22:09:49 crc kubenswrapper[4910]: I0105 22:09:49.412402 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7bab51b8-290d-45bf-b81b-6fa629125e05-config\") pod \"7bab51b8-290d-45bf-b81b-6fa629125e05\" (UID: \"7bab51b8-290d-45bf-b81b-6fa629125e05\") " Jan 05 22:09:49 crc kubenswrapper[4910]: I0105 22:09:49.412488 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7bab51b8-290d-45bf-b81b-6fa629125e05-dns-svc\") pod \"7bab51b8-290d-45bf-b81b-6fa629125e05\" (UID: \"7bab51b8-290d-45bf-b81b-6fa629125e05\") " Jan 05 22:09:49 crc kubenswrapper[4910]: I0105 22:09:49.420144 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bab51b8-290d-45bf-b81b-6fa629125e05-kube-api-access-tr88j" (OuterVolumeSpecName: "kube-api-access-tr88j") pod "7bab51b8-290d-45bf-b81b-6fa629125e05" (UID: "7bab51b8-290d-45bf-b81b-6fa629125e05"). InnerVolumeSpecName "kube-api-access-tr88j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:09:49 crc kubenswrapper[4910]: I0105 22:09:49.434589 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bab51b8-290d-45bf-b81b-6fa629125e05-config" (OuterVolumeSpecName: "config") pod "7bab51b8-290d-45bf-b81b-6fa629125e05" (UID: "7bab51b8-290d-45bf-b81b-6fa629125e05"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:09:49 crc kubenswrapper[4910]: I0105 22:09:49.445216 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bab51b8-290d-45bf-b81b-6fa629125e05-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7bab51b8-290d-45bf-b81b-6fa629125e05" (UID: "7bab51b8-290d-45bf-b81b-6fa629125e05"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:09:49 crc kubenswrapper[4910]: I0105 22:09:49.514997 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tr88j\" (UniqueName: \"kubernetes.io/projected/7bab51b8-290d-45bf-b81b-6fa629125e05-kube-api-access-tr88j\") on node \"crc\" DevicePath \"\"" Jan 05 22:09:49 crc kubenswrapper[4910]: I0105 22:09:49.515053 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7bab51b8-290d-45bf-b81b-6fa629125e05-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:09:49 crc kubenswrapper[4910]: I0105 22:09:49.515065 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7bab51b8-290d-45bf-b81b-6fa629125e05-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 05 22:09:49 crc kubenswrapper[4910]: I0105 22:09:49.790535 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-744ffd65bc-qqb6h" event={"ID":"7bab51b8-290d-45bf-b81b-6fa629125e05","Type":"ContainerDied","Data":"722e0f67738f442021838569a17460289c33dc89e8c97df045a07b61d9baf45e"} Jan 05 22:09:49 crc kubenswrapper[4910]: I0105 22:09:49.790699 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-744ffd65bc-qqb6h" Jan 05 22:09:49 crc kubenswrapper[4910]: I0105 22:09:49.856600 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-744ffd65bc-qqb6h"] Jan 05 22:09:49 crc kubenswrapper[4910]: I0105 22:09:49.863566 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-744ffd65bc-qqb6h"] Jan 05 22:09:50 crc kubenswrapper[4910]: I0105 22:09:50.734221 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bab51b8-290d-45bf-b81b-6fa629125e05" path="/var/lib/kubelet/pods/7bab51b8-290d-45bf-b81b-6fa629125e05/volumes" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.332153 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-sqdcz"] Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.334166 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-sqdcz" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.347052 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-sqdcz"] Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.347563 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.434152 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/266ffadc-b889-4089-9779-c64623269d42-ovs-rundir\") pod \"ovn-controller-metrics-sqdcz\" (UID: \"266ffadc-b889-4089-9779-c64623269d42\") " pod="openstack/ovn-controller-metrics-sqdcz" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.434214 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/266ffadc-b889-4089-9779-c64623269d42-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-sqdcz\" (UID: \"266ffadc-b889-4089-9779-c64623269d42\") " pod="openstack/ovn-controller-metrics-sqdcz" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.434381 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/266ffadc-b889-4089-9779-c64623269d42-config\") pod \"ovn-controller-metrics-sqdcz\" (UID: \"266ffadc-b889-4089-9779-c64623269d42\") " pod="openstack/ovn-controller-metrics-sqdcz" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.434495 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/266ffadc-b889-4089-9779-c64623269d42-ovn-rundir\") pod \"ovn-controller-metrics-sqdcz\" (UID: \"266ffadc-b889-4089-9779-c64623269d42\") " pod="openstack/ovn-controller-metrics-sqdcz" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.434585 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/266ffadc-b889-4089-9779-c64623269d42-combined-ca-bundle\") pod \"ovn-controller-metrics-sqdcz\" (UID: \"266ffadc-b889-4089-9779-c64623269d42\") " pod="openstack/ovn-controller-metrics-sqdcz" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.434658 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-79vl9\" (UniqueName: \"kubernetes.io/projected/266ffadc-b889-4089-9779-c64623269d42-kube-api-access-79vl9\") pod \"ovn-controller-metrics-sqdcz\" (UID: \"266ffadc-b889-4089-9779-c64623269d42\") " pod="openstack/ovn-controller-metrics-sqdcz" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.466085 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7878659675-jdc5c"] Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.471020 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7878659675-jdc5c" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.477662 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7878659675-jdc5c"] Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.479631 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.536424 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/266ffadc-b889-4089-9779-c64623269d42-ovs-rundir\") pod \"ovn-controller-metrics-sqdcz\" (UID: \"266ffadc-b889-4089-9779-c64623269d42\") " pod="openstack/ovn-controller-metrics-sqdcz" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.536475 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/266ffadc-b889-4089-9779-c64623269d42-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-sqdcz\" (UID: \"266ffadc-b889-4089-9779-c64623269d42\") " pod="openstack/ovn-controller-metrics-sqdcz" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.536508 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/266ffadc-b889-4089-9779-c64623269d42-config\") pod \"ovn-controller-metrics-sqdcz\" (UID: \"266ffadc-b889-4089-9779-c64623269d42\") " pod="openstack/ovn-controller-metrics-sqdcz" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.536542 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/266ffadc-b889-4089-9779-c64623269d42-ovn-rundir\") pod \"ovn-controller-metrics-sqdcz\" (UID: \"266ffadc-b889-4089-9779-c64623269d42\") " pod="openstack/ovn-controller-metrics-sqdcz" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.536576 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/266ffadc-b889-4089-9779-c64623269d42-combined-ca-bundle\") pod \"ovn-controller-metrics-sqdcz\" (UID: \"266ffadc-b889-4089-9779-c64623269d42\") " pod="openstack/ovn-controller-metrics-sqdcz" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.536609 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-79vl9\" (UniqueName: \"kubernetes.io/projected/266ffadc-b889-4089-9779-c64623269d42-kube-api-access-79vl9\") pod \"ovn-controller-metrics-sqdcz\" (UID: \"266ffadc-b889-4089-9779-c64623269d42\") " pod="openstack/ovn-controller-metrics-sqdcz" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.537152 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/266ffadc-b889-4089-9779-c64623269d42-ovn-rundir\") pod \"ovn-controller-metrics-sqdcz\" (UID: \"266ffadc-b889-4089-9779-c64623269d42\") " pod="openstack/ovn-controller-metrics-sqdcz" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.537152 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/266ffadc-b889-4089-9779-c64623269d42-ovs-rundir\") pod \"ovn-controller-metrics-sqdcz\" (UID: \"266ffadc-b889-4089-9779-c64623269d42\") " pod="openstack/ovn-controller-metrics-sqdcz" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.537967 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/266ffadc-b889-4089-9779-c64623269d42-config\") pod \"ovn-controller-metrics-sqdcz\" (UID: \"266ffadc-b889-4089-9779-c64623269d42\") " pod="openstack/ovn-controller-metrics-sqdcz" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.545925 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/266ffadc-b889-4089-9779-c64623269d42-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-sqdcz\" (UID: \"266ffadc-b889-4089-9779-c64623269d42\") " pod="openstack/ovn-controller-metrics-sqdcz" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.545939 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/266ffadc-b889-4089-9779-c64623269d42-combined-ca-bundle\") pod \"ovn-controller-metrics-sqdcz\" (UID: \"266ffadc-b889-4089-9779-c64623269d42\") " pod="openstack/ovn-controller-metrics-sqdcz" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.554868 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-79vl9\" (UniqueName: \"kubernetes.io/projected/266ffadc-b889-4089-9779-c64623269d42-kube-api-access-79vl9\") pod \"ovn-controller-metrics-sqdcz\" (UID: \"266ffadc-b889-4089-9779-c64623269d42\") " pod="openstack/ovn-controller-metrics-sqdcz" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.616889 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7878659675-jdc5c"] Jan 05 22:09:56 crc kubenswrapper[4910]: E0105 22:09:56.617496 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[config dns-svc kube-api-access-ttqrl ovsdbserver-nb], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/dnsmasq-dns-7878659675-jdc5c" podUID="13901e99-3735-471c-bb67-0bf52d85ca8a" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.638392 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/13901e99-3735-471c-bb67-0bf52d85ca8a-ovsdbserver-nb\") pod \"dnsmasq-dns-7878659675-jdc5c\" (UID: \"13901e99-3735-471c-bb67-0bf52d85ca8a\") " pod="openstack/dnsmasq-dns-7878659675-jdc5c" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.638452 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/13901e99-3735-471c-bb67-0bf52d85ca8a-dns-svc\") pod \"dnsmasq-dns-7878659675-jdc5c\" (UID: \"13901e99-3735-471c-bb67-0bf52d85ca8a\") " pod="openstack/dnsmasq-dns-7878659675-jdc5c" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.638516 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/13901e99-3735-471c-bb67-0bf52d85ca8a-config\") pod \"dnsmasq-dns-7878659675-jdc5c\" (UID: \"13901e99-3735-471c-bb67-0bf52d85ca8a\") " pod="openstack/dnsmasq-dns-7878659675-jdc5c" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.638550 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttqrl\" (UniqueName: \"kubernetes.io/projected/13901e99-3735-471c-bb67-0bf52d85ca8a-kube-api-access-ttqrl\") pod \"dnsmasq-dns-7878659675-jdc5c\" (UID: \"13901e99-3735-471c-bb67-0bf52d85ca8a\") " pod="openstack/dnsmasq-dns-7878659675-jdc5c" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.664415 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-586b989cdc-nvh2r"] Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.665934 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-586b989cdc-nvh2r" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.668195 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.671106 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-586b989cdc-nvh2r"] Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.671942 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-sqdcz" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.740022 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9b80f22d-bc85-41c3-95b3-8714b15c1359-dns-svc\") pod \"dnsmasq-dns-586b989cdc-nvh2r\" (UID: \"9b80f22d-bc85-41c3-95b3-8714b15c1359\") " pod="openstack/dnsmasq-dns-586b989cdc-nvh2r" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.740134 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4q2m\" (UniqueName: \"kubernetes.io/projected/9b80f22d-bc85-41c3-95b3-8714b15c1359-kube-api-access-v4q2m\") pod \"dnsmasq-dns-586b989cdc-nvh2r\" (UID: \"9b80f22d-bc85-41c3-95b3-8714b15c1359\") " pod="openstack/dnsmasq-dns-586b989cdc-nvh2r" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.740171 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/13901e99-3735-471c-bb67-0bf52d85ca8a-config\") pod \"dnsmasq-dns-7878659675-jdc5c\" (UID: \"13901e99-3735-471c-bb67-0bf52d85ca8a\") " pod="openstack/dnsmasq-dns-7878659675-jdc5c" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.740207 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9b80f22d-bc85-41c3-95b3-8714b15c1359-config\") pod \"dnsmasq-dns-586b989cdc-nvh2r\" (UID: \"9b80f22d-bc85-41c3-95b3-8714b15c1359\") " pod="openstack/dnsmasq-dns-586b989cdc-nvh2r" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.740239 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttqrl\" (UniqueName: \"kubernetes.io/projected/13901e99-3735-471c-bb67-0bf52d85ca8a-kube-api-access-ttqrl\") pod \"dnsmasq-dns-7878659675-jdc5c\" (UID: \"13901e99-3735-471c-bb67-0bf52d85ca8a\") " pod="openstack/dnsmasq-dns-7878659675-jdc5c" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.740267 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9b80f22d-bc85-41c3-95b3-8714b15c1359-ovsdbserver-sb\") pod \"dnsmasq-dns-586b989cdc-nvh2r\" (UID: \"9b80f22d-bc85-41c3-95b3-8714b15c1359\") " pod="openstack/dnsmasq-dns-586b989cdc-nvh2r" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.740298 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9b80f22d-bc85-41c3-95b3-8714b15c1359-ovsdbserver-nb\") pod \"dnsmasq-dns-586b989cdc-nvh2r\" (UID: \"9b80f22d-bc85-41c3-95b3-8714b15c1359\") " pod="openstack/dnsmasq-dns-586b989cdc-nvh2r" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.740343 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/13901e99-3735-471c-bb67-0bf52d85ca8a-ovsdbserver-nb\") pod \"dnsmasq-dns-7878659675-jdc5c\" (UID: \"13901e99-3735-471c-bb67-0bf52d85ca8a\") " pod="openstack/dnsmasq-dns-7878659675-jdc5c" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.740369 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/13901e99-3735-471c-bb67-0bf52d85ca8a-dns-svc\") pod \"dnsmasq-dns-7878659675-jdc5c\" (UID: \"13901e99-3735-471c-bb67-0bf52d85ca8a\") " pod="openstack/dnsmasq-dns-7878659675-jdc5c" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.741190 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/13901e99-3735-471c-bb67-0bf52d85ca8a-dns-svc\") pod \"dnsmasq-dns-7878659675-jdc5c\" (UID: \"13901e99-3735-471c-bb67-0bf52d85ca8a\") " pod="openstack/dnsmasq-dns-7878659675-jdc5c" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.741745 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/13901e99-3735-471c-bb67-0bf52d85ca8a-config\") pod \"dnsmasq-dns-7878659675-jdc5c\" (UID: \"13901e99-3735-471c-bb67-0bf52d85ca8a\") " pod="openstack/dnsmasq-dns-7878659675-jdc5c" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.742735 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/13901e99-3735-471c-bb67-0bf52d85ca8a-ovsdbserver-nb\") pod \"dnsmasq-dns-7878659675-jdc5c\" (UID: \"13901e99-3735-471c-bb67-0bf52d85ca8a\") " pod="openstack/dnsmasq-dns-7878659675-jdc5c" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.766378 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttqrl\" (UniqueName: \"kubernetes.io/projected/13901e99-3735-471c-bb67-0bf52d85ca8a-kube-api-access-ttqrl\") pod \"dnsmasq-dns-7878659675-jdc5c\" (UID: \"13901e99-3735-471c-bb67-0bf52d85ca8a\") " pod="openstack/dnsmasq-dns-7878659675-jdc5c" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.841558 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9b80f22d-bc85-41c3-95b3-8714b15c1359-dns-svc\") pod \"dnsmasq-dns-586b989cdc-nvh2r\" (UID: \"9b80f22d-bc85-41c3-95b3-8714b15c1359\") " pod="openstack/dnsmasq-dns-586b989cdc-nvh2r" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.841633 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4q2m\" (UniqueName: \"kubernetes.io/projected/9b80f22d-bc85-41c3-95b3-8714b15c1359-kube-api-access-v4q2m\") pod \"dnsmasq-dns-586b989cdc-nvh2r\" (UID: \"9b80f22d-bc85-41c3-95b3-8714b15c1359\") " pod="openstack/dnsmasq-dns-586b989cdc-nvh2r" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.841669 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9b80f22d-bc85-41c3-95b3-8714b15c1359-config\") pod \"dnsmasq-dns-586b989cdc-nvh2r\" (UID: \"9b80f22d-bc85-41c3-95b3-8714b15c1359\") " pod="openstack/dnsmasq-dns-586b989cdc-nvh2r" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.841706 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9b80f22d-bc85-41c3-95b3-8714b15c1359-ovsdbserver-sb\") pod \"dnsmasq-dns-586b989cdc-nvh2r\" (UID: \"9b80f22d-bc85-41c3-95b3-8714b15c1359\") " pod="openstack/dnsmasq-dns-586b989cdc-nvh2r" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.841740 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9b80f22d-bc85-41c3-95b3-8714b15c1359-ovsdbserver-nb\") pod \"dnsmasq-dns-586b989cdc-nvh2r\" (UID: \"9b80f22d-bc85-41c3-95b3-8714b15c1359\") " pod="openstack/dnsmasq-dns-586b989cdc-nvh2r" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.843087 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9b80f22d-bc85-41c3-95b3-8714b15c1359-dns-svc\") pod \"dnsmasq-dns-586b989cdc-nvh2r\" (UID: \"9b80f22d-bc85-41c3-95b3-8714b15c1359\") " pod="openstack/dnsmasq-dns-586b989cdc-nvh2r" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.844390 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9b80f22d-bc85-41c3-95b3-8714b15c1359-config\") pod \"dnsmasq-dns-586b989cdc-nvh2r\" (UID: \"9b80f22d-bc85-41c3-95b3-8714b15c1359\") " pod="openstack/dnsmasq-dns-586b989cdc-nvh2r" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.845064 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9b80f22d-bc85-41c3-95b3-8714b15c1359-ovsdbserver-sb\") pod \"dnsmasq-dns-586b989cdc-nvh2r\" (UID: \"9b80f22d-bc85-41c3-95b3-8714b15c1359\") " pod="openstack/dnsmasq-dns-586b989cdc-nvh2r" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.845526 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9b80f22d-bc85-41c3-95b3-8714b15c1359-ovsdbserver-nb\") pod \"dnsmasq-dns-586b989cdc-nvh2r\" (UID: \"9b80f22d-bc85-41c3-95b3-8714b15c1359\") " pod="openstack/dnsmasq-dns-586b989cdc-nvh2r" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.847859 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7878659675-jdc5c" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.848761 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-9g2kt" event={"ID":"780aad6a-41ff-410c-a6fc-6be2faf38b6f","Type":"ContainerStarted","Data":"1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338"} Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.860543 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7878659675-jdc5c" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.869533 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4q2m\" (UniqueName: \"kubernetes.io/projected/9b80f22d-bc85-41c3-95b3-8714b15c1359-kube-api-access-v4q2m\") pod \"dnsmasq-dns-586b989cdc-nvh2r\" (UID: \"9b80f22d-bc85-41c3-95b3-8714b15c1359\") " pod="openstack/dnsmasq-dns-586b989cdc-nvh2r" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.943291 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/13901e99-3735-471c-bb67-0bf52d85ca8a-ovsdbserver-nb\") pod \"13901e99-3735-471c-bb67-0bf52d85ca8a\" (UID: \"13901e99-3735-471c-bb67-0bf52d85ca8a\") " Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.943426 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ttqrl\" (UniqueName: \"kubernetes.io/projected/13901e99-3735-471c-bb67-0bf52d85ca8a-kube-api-access-ttqrl\") pod \"13901e99-3735-471c-bb67-0bf52d85ca8a\" (UID: \"13901e99-3735-471c-bb67-0bf52d85ca8a\") " Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.943450 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/13901e99-3735-471c-bb67-0bf52d85ca8a-config\") pod \"13901e99-3735-471c-bb67-0bf52d85ca8a\" (UID: \"13901e99-3735-471c-bb67-0bf52d85ca8a\") " Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.943503 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/13901e99-3735-471c-bb67-0bf52d85ca8a-dns-svc\") pod \"13901e99-3735-471c-bb67-0bf52d85ca8a\" (UID: \"13901e99-3735-471c-bb67-0bf52d85ca8a\") " Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.944329 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/13901e99-3735-471c-bb67-0bf52d85ca8a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "13901e99-3735-471c-bb67-0bf52d85ca8a" (UID: "13901e99-3735-471c-bb67-0bf52d85ca8a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.944742 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/13901e99-3735-471c-bb67-0bf52d85ca8a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "13901e99-3735-471c-bb67-0bf52d85ca8a" (UID: "13901e99-3735-471c-bb67-0bf52d85ca8a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.945161 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/13901e99-3735-471c-bb67-0bf52d85ca8a-config" (OuterVolumeSpecName: "config") pod "13901e99-3735-471c-bb67-0bf52d85ca8a" (UID: "13901e99-3735-471c-bb67-0bf52d85ca8a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.948807 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13901e99-3735-471c-bb67-0bf52d85ca8a-kube-api-access-ttqrl" (OuterVolumeSpecName: "kube-api-access-ttqrl") pod "13901e99-3735-471c-bb67-0bf52d85ca8a" (UID: "13901e99-3735-471c-bb67-0bf52d85ca8a"). InnerVolumeSpecName "kube-api-access-ttqrl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:09:56 crc kubenswrapper[4910]: I0105 22:09:56.989977 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-586b989cdc-nvh2r" Jan 05 22:09:57 crc kubenswrapper[4910]: I0105 22:09:57.045275 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/13901e99-3735-471c-bb67-0bf52d85ca8a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 05 22:09:57 crc kubenswrapper[4910]: I0105 22:09:57.045612 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ttqrl\" (UniqueName: \"kubernetes.io/projected/13901e99-3735-471c-bb67-0bf52d85ca8a-kube-api-access-ttqrl\") on node \"crc\" DevicePath \"\"" Jan 05 22:09:57 crc kubenswrapper[4910]: I0105 22:09:57.045627 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/13901e99-3735-471c-bb67-0bf52d85ca8a-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:09:57 crc kubenswrapper[4910]: I0105 22:09:57.045636 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/13901e99-3735-471c-bb67-0bf52d85ca8a-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 05 22:09:57 crc kubenswrapper[4910]: I0105 22:09:57.382596 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Jan 05 22:09:57 crc kubenswrapper[4910]: I0105 22:09:57.858385 4910 generic.go:334] "Generic (PLEG): container finished" podID="f9587597-0dcc-4c3a-b578-f9797dd2f9c1" containerID="7dc2abc97367e4404626e780ecb73ce8187a7fc4d65dc7e368107933d3d9b81a" exitCode=0 Jan 05 22:09:57 crc kubenswrapper[4910]: I0105 22:09:57.858468 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7878659675-jdc5c" Jan 05 22:09:57 crc kubenswrapper[4910]: I0105 22:09:57.858481 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"f9587597-0dcc-4c3a-b578-f9797dd2f9c1","Type":"ContainerDied","Data":"7dc2abc97367e4404626e780ecb73ce8187a7fc4d65dc7e368107933d3d9b81a"} Jan 05 22:09:57 crc kubenswrapper[4910]: I0105 22:09:57.934271 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7878659675-jdc5c"] Jan 05 22:09:57 crc kubenswrapper[4910]: I0105 22:09:57.943103 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7878659675-jdc5c"] Jan 05 22:09:58 crc kubenswrapper[4910]: I0105 22:09:58.734954 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="13901e99-3735-471c-bb67-0bf52d85ca8a" path="/var/lib/kubelet/pods/13901e99-3735-471c-bb67-0bf52d85ca8a/volumes" Jan 05 22:09:58 crc kubenswrapper[4910]: W0105 22:09:58.865966 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9b80f22d_bc85_41c3_95b3_8714b15c1359.slice/crio-f1b16996d4855d2a80e64fff90429579a023dee0fd73098bc7bd5428c1833fc0 WatchSource:0}: Error finding container f1b16996d4855d2a80e64fff90429579a023dee0fd73098bc7bd5428c1833fc0: Status 404 returned error can't find the container with id f1b16996d4855d2a80e64fff90429579a023dee0fd73098bc7bd5428c1833fc0 Jan 05 22:09:58 crc kubenswrapper[4910]: I0105 22:09:58.867683 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-586b989cdc-nvh2r"] Jan 05 22:09:58 crc kubenswrapper[4910]: I0105 22:09:58.868029 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"f9587597-0dcc-4c3a-b578-f9797dd2f9c1","Type":"ContainerStarted","Data":"e582b0a3f3996c075f0d0a3ac06e81dc222960f93f515619da44899ee0b2bce4"} Jan 05 22:09:58 crc kubenswrapper[4910]: I0105 22:09:58.880247 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-9g2kt" event={"ID":"780aad6a-41ff-410c-a6fc-6be2faf38b6f","Type":"ContainerStarted","Data":"9b905fc177f4972520c21e7cfca7402f4ec2958051e09f353da013b9785c1fd5"} Jan 05 22:09:58 crc kubenswrapper[4910]: I0105 22:09:58.880364 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-9g2kt" Jan 05 22:09:58 crc kubenswrapper[4910]: I0105 22:09:58.881079 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-9g2kt" Jan 05 22:09:58 crc kubenswrapper[4910]: I0105 22:09:58.890484 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c","Type":"ContainerStarted","Data":"5e44050096e20cb6a25794fd8f53d149477e83b2422b09ae2b60db07b3bfa76e"} Jan 05 22:09:58 crc kubenswrapper[4910]: I0105 22:09:58.909768 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"c6909118-b0ce-402c-8bb4-7ce665250739","Type":"ContainerStarted","Data":"59360022044ca27d9ee5757033d8f3c5a80aee6675d87823287602295063b5ec"} Jan 05 22:09:58 crc kubenswrapper[4910]: I0105 22:09:58.917519 4910 generic.go:334] "Generic (PLEG): container finished" podID="2cb18efe-a80d-4657-921d-af4a18ae279d" containerID="44a69179172486de1cfaee52c5b45f28ff6e2522c6ba8a153a50795a10335125" exitCode=0 Jan 05 22:09:58 crc kubenswrapper[4910]: I0105 22:09:58.917573 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"2cb18efe-a80d-4657-921d-af4a18ae279d","Type":"ContainerDied","Data":"44a69179172486de1cfaee52c5b45f28ff6e2522c6ba8a153a50795a10335125"} Jan 05 22:09:58 crc kubenswrapper[4910]: I0105 22:09:58.935995 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=25.5750273 podStartE2EDuration="33.93597403s" podCreationTimestamp="2026-01-05 22:09:25 +0000 UTC" firstStartedPulling="2026-01-05 22:09:39.443915925 +0000 UTC m=+1111.021413585" lastFinishedPulling="2026-01-05 22:09:47.804862645 +0000 UTC m=+1119.382360315" observedRunningTime="2026-01-05 22:09:58.895099434 +0000 UTC m=+1130.472597124" watchObservedRunningTime="2026-01-05 22:09:58.93597403 +0000 UTC m=+1130.513471710" Jan 05 22:09:58 crc kubenswrapper[4910]: I0105 22:09:58.943953 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-9g2kt" podStartSLOduration=18.297602732 podStartE2EDuration="25.943930372s" podCreationTimestamp="2026-01-05 22:09:33 +0000 UTC" firstStartedPulling="2026-01-05 22:09:40.139428653 +0000 UTC m=+1111.716926323" lastFinishedPulling="2026-01-05 22:09:47.785756293 +0000 UTC m=+1119.363253963" observedRunningTime="2026-01-05 22:09:58.921947032 +0000 UTC m=+1130.499444702" watchObservedRunningTime="2026-01-05 22:09:58.943930372 +0000 UTC m=+1130.521428042" Jan 05 22:09:58 crc kubenswrapper[4910]: I0105 22:09:58.945649 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=5.826649322 podStartE2EDuration="23.945603263s" podCreationTimestamp="2026-01-05 22:09:35 +0000 UTC" firstStartedPulling="2026-01-05 22:09:40.385501063 +0000 UTC m=+1111.962998723" lastFinishedPulling="2026-01-05 22:09:58.504454994 +0000 UTC m=+1130.081952664" observedRunningTime="2026-01-05 22:09:58.944379153 +0000 UTC m=+1130.521876823" watchObservedRunningTime="2026-01-05 22:09:58.945603263 +0000 UTC m=+1130.523100953" Jan 05 22:09:58 crc kubenswrapper[4910]: I0105 22:09:58.987651 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-sqdcz"] Jan 05 22:09:58 crc kubenswrapper[4910]: I0105 22:09:58.993729 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=8.731421164 podStartE2EDuration="26.993701524s" podCreationTimestamp="2026-01-05 22:09:32 +0000 UTC" firstStartedPulling="2026-01-05 22:09:40.221182997 +0000 UTC m=+1111.798680667" lastFinishedPulling="2026-01-05 22:09:58.483463357 +0000 UTC m=+1130.060961027" observedRunningTime="2026-01-05 22:09:58.986728715 +0000 UTC m=+1130.564226385" watchObservedRunningTime="2026-01-05 22:09:58.993701524 +0000 UTC m=+1130.571199194" Jan 05 22:09:59 crc kubenswrapper[4910]: I0105 22:09:59.263449 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-586b989cdc-nvh2r"] Jan 05 22:09:59 crc kubenswrapper[4910]: I0105 22:09:59.265608 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 05 22:09:59 crc kubenswrapper[4910]: I0105 22:09:59.294233 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Jan 05 22:09:59 crc kubenswrapper[4910]: I0105 22:09:59.355934 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-67fdf7998c-ghk4m"] Jan 05 22:09:59 crc kubenswrapper[4910]: I0105 22:09:59.363489 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67fdf7998c-ghk4m" Jan 05 22:09:59 crc kubenswrapper[4910]: I0105 22:09:59.383498 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-67fdf7998c-ghk4m"] Jan 05 22:09:59 crc kubenswrapper[4910]: I0105 22:09:59.416083 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/37ca269e-6da3-4dba-943f-6f2e957c8036-ovsdbserver-nb\") pod \"dnsmasq-dns-67fdf7998c-ghk4m\" (UID: \"37ca269e-6da3-4dba-943f-6f2e957c8036\") " pod="openstack/dnsmasq-dns-67fdf7998c-ghk4m" Jan 05 22:09:59 crc kubenswrapper[4910]: I0105 22:09:59.416240 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37ca269e-6da3-4dba-943f-6f2e957c8036-config\") pod \"dnsmasq-dns-67fdf7998c-ghk4m\" (UID: \"37ca269e-6da3-4dba-943f-6f2e957c8036\") " pod="openstack/dnsmasq-dns-67fdf7998c-ghk4m" Jan 05 22:09:59 crc kubenswrapper[4910]: I0105 22:09:59.416308 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/37ca269e-6da3-4dba-943f-6f2e957c8036-dns-svc\") pod \"dnsmasq-dns-67fdf7998c-ghk4m\" (UID: \"37ca269e-6da3-4dba-943f-6f2e957c8036\") " pod="openstack/dnsmasq-dns-67fdf7998c-ghk4m" Jan 05 22:09:59 crc kubenswrapper[4910]: I0105 22:09:59.416336 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/37ca269e-6da3-4dba-943f-6f2e957c8036-ovsdbserver-sb\") pod \"dnsmasq-dns-67fdf7998c-ghk4m\" (UID: \"37ca269e-6da3-4dba-943f-6f2e957c8036\") " pod="openstack/dnsmasq-dns-67fdf7998c-ghk4m" Jan 05 22:09:59 crc kubenswrapper[4910]: I0105 22:09:59.416368 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hnfcg\" (UniqueName: \"kubernetes.io/projected/37ca269e-6da3-4dba-943f-6f2e957c8036-kube-api-access-hnfcg\") pod \"dnsmasq-dns-67fdf7998c-ghk4m\" (UID: \"37ca269e-6da3-4dba-943f-6f2e957c8036\") " pod="openstack/dnsmasq-dns-67fdf7998c-ghk4m" Jan 05 22:09:59 crc kubenswrapper[4910]: I0105 22:09:59.520050 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/37ca269e-6da3-4dba-943f-6f2e957c8036-ovsdbserver-nb\") pod \"dnsmasq-dns-67fdf7998c-ghk4m\" (UID: \"37ca269e-6da3-4dba-943f-6f2e957c8036\") " pod="openstack/dnsmasq-dns-67fdf7998c-ghk4m" Jan 05 22:09:59 crc kubenswrapper[4910]: I0105 22:09:59.520133 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37ca269e-6da3-4dba-943f-6f2e957c8036-config\") pod \"dnsmasq-dns-67fdf7998c-ghk4m\" (UID: \"37ca269e-6da3-4dba-943f-6f2e957c8036\") " pod="openstack/dnsmasq-dns-67fdf7998c-ghk4m" Jan 05 22:09:59 crc kubenswrapper[4910]: I0105 22:09:59.520192 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/37ca269e-6da3-4dba-943f-6f2e957c8036-dns-svc\") pod \"dnsmasq-dns-67fdf7998c-ghk4m\" (UID: \"37ca269e-6da3-4dba-943f-6f2e957c8036\") " pod="openstack/dnsmasq-dns-67fdf7998c-ghk4m" Jan 05 22:09:59 crc kubenswrapper[4910]: I0105 22:09:59.520219 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/37ca269e-6da3-4dba-943f-6f2e957c8036-ovsdbserver-sb\") pod \"dnsmasq-dns-67fdf7998c-ghk4m\" (UID: \"37ca269e-6da3-4dba-943f-6f2e957c8036\") " pod="openstack/dnsmasq-dns-67fdf7998c-ghk4m" Jan 05 22:09:59 crc kubenswrapper[4910]: I0105 22:09:59.520245 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hnfcg\" (UniqueName: \"kubernetes.io/projected/37ca269e-6da3-4dba-943f-6f2e957c8036-kube-api-access-hnfcg\") pod \"dnsmasq-dns-67fdf7998c-ghk4m\" (UID: \"37ca269e-6da3-4dba-943f-6f2e957c8036\") " pod="openstack/dnsmasq-dns-67fdf7998c-ghk4m" Jan 05 22:09:59 crc kubenswrapper[4910]: I0105 22:09:59.521548 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/37ca269e-6da3-4dba-943f-6f2e957c8036-ovsdbserver-nb\") pod \"dnsmasq-dns-67fdf7998c-ghk4m\" (UID: \"37ca269e-6da3-4dba-943f-6f2e957c8036\") " pod="openstack/dnsmasq-dns-67fdf7998c-ghk4m" Jan 05 22:09:59 crc kubenswrapper[4910]: I0105 22:09:59.521791 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/37ca269e-6da3-4dba-943f-6f2e957c8036-dns-svc\") pod \"dnsmasq-dns-67fdf7998c-ghk4m\" (UID: \"37ca269e-6da3-4dba-943f-6f2e957c8036\") " pod="openstack/dnsmasq-dns-67fdf7998c-ghk4m" Jan 05 22:09:59 crc kubenswrapper[4910]: I0105 22:09:59.525555 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/37ca269e-6da3-4dba-943f-6f2e957c8036-ovsdbserver-sb\") pod \"dnsmasq-dns-67fdf7998c-ghk4m\" (UID: \"37ca269e-6da3-4dba-943f-6f2e957c8036\") " pod="openstack/dnsmasq-dns-67fdf7998c-ghk4m" Jan 05 22:09:59 crc kubenswrapper[4910]: I0105 22:09:59.526408 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37ca269e-6da3-4dba-943f-6f2e957c8036-config\") pod \"dnsmasq-dns-67fdf7998c-ghk4m\" (UID: \"37ca269e-6da3-4dba-943f-6f2e957c8036\") " pod="openstack/dnsmasq-dns-67fdf7998c-ghk4m" Jan 05 22:09:59 crc kubenswrapper[4910]: I0105 22:09:59.548024 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hnfcg\" (UniqueName: \"kubernetes.io/projected/37ca269e-6da3-4dba-943f-6f2e957c8036-kube-api-access-hnfcg\") pod \"dnsmasq-dns-67fdf7998c-ghk4m\" (UID: \"37ca269e-6da3-4dba-943f-6f2e957c8036\") " pod="openstack/dnsmasq-dns-67fdf7998c-ghk4m" Jan 05 22:09:59 crc kubenswrapper[4910]: I0105 22:09:59.695111 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67fdf7998c-ghk4m" Jan 05 22:09:59 crc kubenswrapper[4910]: I0105 22:09:59.926507 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-sqdcz" event={"ID":"266ffadc-b889-4089-9779-c64623269d42","Type":"ContainerStarted","Data":"0aed0283be1d2d7717625b9ca57d441f05965d2b141a4e5d7c184eead1f9c999"} Jan 05 22:09:59 crc kubenswrapper[4910]: I0105 22:09:59.927137 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-sqdcz" event={"ID":"266ffadc-b889-4089-9779-c64623269d42","Type":"ContainerStarted","Data":"0abeec70948d2691606e88cd47569b900e6929c45325e7ee02d3961b959ea6a4"} Jan 05 22:09:59 crc kubenswrapper[4910]: I0105 22:09:59.935790 4910 generic.go:334] "Generic (PLEG): container finished" podID="9b80f22d-bc85-41c3-95b3-8714b15c1359" containerID="bc25c07274c06a6469cd5b1ce580917f71e0ae9888a0c737cc910cbc12d067e6" exitCode=0 Jan 05 22:09:59 crc kubenswrapper[4910]: I0105 22:09:59.935918 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586b989cdc-nvh2r" event={"ID":"9b80f22d-bc85-41c3-95b3-8714b15c1359","Type":"ContainerDied","Data":"bc25c07274c06a6469cd5b1ce580917f71e0ae9888a0c737cc910cbc12d067e6"} Jan 05 22:09:59 crc kubenswrapper[4910]: I0105 22:09:59.935968 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586b989cdc-nvh2r" event={"ID":"9b80f22d-bc85-41c3-95b3-8714b15c1359","Type":"ContainerStarted","Data":"f1b16996d4855d2a80e64fff90429579a023dee0fd73098bc7bd5428c1833fc0"} Jan 05 22:09:59 crc kubenswrapper[4910]: I0105 22:09:59.953780 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"2cb18efe-a80d-4657-921d-af4a18ae279d","Type":"ContainerStarted","Data":"631d63a96f64fb0aa20db63e43afb3158c0927307ea2182cd6951a7f9852fdca"} Jan 05 22:09:59 crc kubenswrapper[4910]: I0105 22:09:59.965231 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-sqdcz" podStartSLOduration=3.965202824 podStartE2EDuration="3.965202824s" podCreationTimestamp="2026-01-05 22:09:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:09:59.957678103 +0000 UTC m=+1131.535175773" watchObservedRunningTime="2026-01-05 22:09:59.965202824 +0000 UTC m=+1131.542700494" Jan 05 22:10:00 crc kubenswrapper[4910]: I0105 22:10:00.046759 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=28.224747773 podStartE2EDuration="36.046739883s" podCreationTimestamp="2026-01-05 22:09:24 +0000 UTC" firstStartedPulling="2026-01-05 22:09:39.99305302 +0000 UTC m=+1111.570550690" lastFinishedPulling="2026-01-05 22:09:47.81504513 +0000 UTC m=+1119.392542800" observedRunningTime="2026-01-05 22:10:00.018443189 +0000 UTC m=+1131.595940869" watchObservedRunningTime="2026-01-05 22:10:00.046739883 +0000 UTC m=+1131.624237553" Jan 05 22:10:00 crc kubenswrapper[4910]: E0105 22:10:00.248812 4910 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Jan 05 22:10:00 crc kubenswrapper[4910]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/9b80f22d-bc85-41c3-95b3-8714b15c1359/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Jan 05 22:10:00 crc kubenswrapper[4910]: > podSandboxID="f1b16996d4855d2a80e64fff90429579a023dee0fd73098bc7bd5428c1833fc0" Jan 05 22:10:00 crc kubenswrapper[4910]: E0105 22:10:00.249008 4910 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 05 22:10:00 crc kubenswrapper[4910]: container &Container{Name:dnsmasq-dns,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n599h5cbh7ch5d4h66fh676hdbh546h95h88h5ffh55ch7fhch57ch687hddhc7h5fdh57dh674h56fh64ch98h9bh557h55dh646h54ch54fh5c4h597q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/ovsdbserver-nb,SubPath:ovsdbserver-nb,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-sb,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/ovsdbserver-sb,SubPath:ovsdbserver-sb,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-v4q2m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-586b989cdc-nvh2r_openstack(9b80f22d-bc85-41c3-95b3-8714b15c1359): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/9b80f22d-bc85-41c3-95b3-8714b15c1359/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Jan 05 22:10:00 crc kubenswrapper[4910]: > logger="UnhandledError" Jan 05 22:10:00 crc kubenswrapper[4910]: E0105 22:10:00.250768 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dnsmasq-dns\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/9b80f22d-bc85-41c3-95b3-8714b15c1359/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-586b989cdc-nvh2r" podUID="9b80f22d-bc85-41c3-95b3-8714b15c1359" Jan 05 22:10:00 crc kubenswrapper[4910]: I0105 22:10:00.259917 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-67fdf7998c-ghk4m"] Jan 05 22:10:00 crc kubenswrapper[4910]: W0105 22:10:00.294465 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod37ca269e_6da3_4dba_943f_6f2e957c8036.slice/crio-a901abb7d90cdaf2ea81dc7ddeceb92c320a1fb1dc9fb52b77d7f0aa596a3eab WatchSource:0}: Error finding container a901abb7d90cdaf2ea81dc7ddeceb92c320a1fb1dc9fb52b77d7f0aa596a3eab: Status 404 returned error can't find the container with id a901abb7d90cdaf2ea81dc7ddeceb92c320a1fb1dc9fb52b77d7f0aa596a3eab Jan 05 22:10:00 crc kubenswrapper[4910]: I0105 22:10:00.490166 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Jan 05 22:10:00 crc kubenswrapper[4910]: I0105 22:10:00.496683 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 05 22:10:00 crc kubenswrapper[4910]: I0105 22:10:00.502428 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Jan 05 22:10:00 crc kubenswrapper[4910]: I0105 22:10:00.503012 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-zzsjr" Jan 05 22:10:00 crc kubenswrapper[4910]: I0105 22:10:00.503251 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Jan 05 22:10:00 crc kubenswrapper[4910]: I0105 22:10:00.503924 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Jan 05 22:10:00 crc kubenswrapper[4910]: I0105 22:10:00.514910 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 05 22:10:00 crc kubenswrapper[4910]: I0105 22:10:00.565265 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"swift-storage-0\" (UID: \"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c\") " pod="openstack/swift-storage-0" Jan 05 22:10:00 crc kubenswrapper[4910]: I0105 22:10:00.565330 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b7m87\" (UniqueName: \"kubernetes.io/projected/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-kube-api-access-b7m87\") pod \"swift-storage-0\" (UID: \"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c\") " pod="openstack/swift-storage-0" Jan 05 22:10:00 crc kubenswrapper[4910]: I0105 22:10:00.565365 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-cache\") pod \"swift-storage-0\" (UID: \"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c\") " pod="openstack/swift-storage-0" Jan 05 22:10:00 crc kubenswrapper[4910]: I0105 22:10:00.565391 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-etc-swift\") pod \"swift-storage-0\" (UID: \"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c\") " pod="openstack/swift-storage-0" Jan 05 22:10:00 crc kubenswrapper[4910]: I0105 22:10:00.565437 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-lock\") pod \"swift-storage-0\" (UID: \"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c\") " pod="openstack/swift-storage-0" Jan 05 22:10:00 crc kubenswrapper[4910]: I0105 22:10:00.667549 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"swift-storage-0\" (UID: \"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c\") " pod="openstack/swift-storage-0" Jan 05 22:10:00 crc kubenswrapper[4910]: I0105 22:10:00.667645 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b7m87\" (UniqueName: \"kubernetes.io/projected/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-kube-api-access-b7m87\") pod \"swift-storage-0\" (UID: \"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c\") " pod="openstack/swift-storage-0" Jan 05 22:10:00 crc kubenswrapper[4910]: I0105 22:10:00.667688 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-cache\") pod \"swift-storage-0\" (UID: \"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c\") " pod="openstack/swift-storage-0" Jan 05 22:10:00 crc kubenswrapper[4910]: I0105 22:10:00.667720 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-etc-swift\") pod \"swift-storage-0\" (UID: \"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c\") " pod="openstack/swift-storage-0" Jan 05 22:10:00 crc kubenswrapper[4910]: I0105 22:10:00.667794 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-lock\") pod \"swift-storage-0\" (UID: \"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c\") " pod="openstack/swift-storage-0" Jan 05 22:10:00 crc kubenswrapper[4910]: I0105 22:10:00.668155 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"swift-storage-0\" (UID: \"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/swift-storage-0" Jan 05 22:10:00 crc kubenswrapper[4910]: I0105 22:10:00.668430 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-cache\") pod \"swift-storage-0\" (UID: \"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c\") " pod="openstack/swift-storage-0" Jan 05 22:10:00 crc kubenswrapper[4910]: I0105 22:10:00.668495 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-lock\") pod \"swift-storage-0\" (UID: \"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c\") " pod="openstack/swift-storage-0" Jan 05 22:10:00 crc kubenswrapper[4910]: E0105 22:10:00.668577 4910 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 05 22:10:00 crc kubenswrapper[4910]: E0105 22:10:00.668601 4910 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 05 22:10:00 crc kubenswrapper[4910]: E0105 22:10:00.668656 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-etc-swift podName:4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c nodeName:}" failed. No retries permitted until 2026-01-05 22:10:01.168637353 +0000 UTC m=+1132.746135013 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-etc-swift") pod "swift-storage-0" (UID: "4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c") : configmap "swift-ring-files" not found Jan 05 22:10:00 crc kubenswrapper[4910]: I0105 22:10:00.690216 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"swift-storage-0\" (UID: \"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c\") " pod="openstack/swift-storage-0" Jan 05 22:10:00 crc kubenswrapper[4910]: I0105 22:10:00.690344 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b7m87\" (UniqueName: \"kubernetes.io/projected/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-kube-api-access-b7m87\") pod \"swift-storage-0\" (UID: \"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c\") " pod="openstack/swift-storage-0" Jan 05 22:10:00 crc kubenswrapper[4910]: I0105 22:10:00.787219 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Jan 05 22:10:00 crc kubenswrapper[4910]: I0105 22:10:00.833381 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Jan 05 22:10:00 crc kubenswrapper[4910]: I0105 22:10:00.961636 4910 generic.go:334] "Generic (PLEG): container finished" podID="37ca269e-6da3-4dba-943f-6f2e957c8036" containerID="c1cf57a3c90ac52dd9c7f167357a63e5f638af128a9fc9c6a1d621c7efd91957" exitCode=0 Jan 05 22:10:00 crc kubenswrapper[4910]: I0105 22:10:00.961710 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67fdf7998c-ghk4m" event={"ID":"37ca269e-6da3-4dba-943f-6f2e957c8036","Type":"ContainerDied","Data":"c1cf57a3c90ac52dd9c7f167357a63e5f638af128a9fc9c6a1d621c7efd91957"} Jan 05 22:10:00 crc kubenswrapper[4910]: I0105 22:10:00.961777 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67fdf7998c-ghk4m" event={"ID":"37ca269e-6da3-4dba-943f-6f2e957c8036","Type":"ContainerStarted","Data":"a901abb7d90cdaf2ea81dc7ddeceb92c320a1fb1dc9fb52b77d7f0aa596a3eab"} Jan 05 22:10:00 crc kubenswrapper[4910]: I0105 22:10:00.962366 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.025078 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.092042 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-8k5dk"] Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.093214 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-8k5dk" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.096357 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.096511 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.096739 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.113045 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-8k5dk"] Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.186689 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-etc-swift\") pod \"swift-storage-0\" (UID: \"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c\") " pod="openstack/swift-storage-0" Jan 05 22:10:01 crc kubenswrapper[4910]: E0105 22:10:01.186888 4910 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 05 22:10:01 crc kubenswrapper[4910]: E0105 22:10:01.186912 4910 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 05 22:10:01 crc kubenswrapper[4910]: E0105 22:10:01.186963 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-etc-swift podName:4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c nodeName:}" failed. No retries permitted until 2026-01-05 22:10:02.186948074 +0000 UTC m=+1133.764445744 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-etc-swift") pod "swift-storage-0" (UID: "4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c") : configmap "swift-ring-files" not found Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.288428 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-combined-ca-bundle\") pod \"swift-ring-rebalance-8k5dk\" (UID: \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\") " pod="openstack/swift-ring-rebalance-8k5dk" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.288494 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-ring-data-devices\") pod \"swift-ring-rebalance-8k5dk\" (UID: \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\") " pod="openstack/swift-ring-rebalance-8k5dk" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.288527 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-etc-swift\") pod \"swift-ring-rebalance-8k5dk\" (UID: \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\") " pod="openstack/swift-ring-rebalance-8k5dk" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.288588 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ntb8w\" (UniqueName: \"kubernetes.io/projected/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-kube-api-access-ntb8w\") pod \"swift-ring-rebalance-8k5dk\" (UID: \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\") " pod="openstack/swift-ring-rebalance-8k5dk" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.288638 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-scripts\") pod \"swift-ring-rebalance-8k5dk\" (UID: \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\") " pod="openstack/swift-ring-rebalance-8k5dk" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.288887 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-dispersionconf\") pod \"swift-ring-rebalance-8k5dk\" (UID: \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\") " pod="openstack/swift-ring-rebalance-8k5dk" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.289154 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-swiftconf\") pod \"swift-ring-rebalance-8k5dk\" (UID: \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\") " pod="openstack/swift-ring-rebalance-8k5dk" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.295094 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.344923 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.391467 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-ring-data-devices\") pod \"swift-ring-rebalance-8k5dk\" (UID: \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\") " pod="openstack/swift-ring-rebalance-8k5dk" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.392053 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-etc-swift\") pod \"swift-ring-rebalance-8k5dk\" (UID: \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\") " pod="openstack/swift-ring-rebalance-8k5dk" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.392149 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ntb8w\" (UniqueName: \"kubernetes.io/projected/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-kube-api-access-ntb8w\") pod \"swift-ring-rebalance-8k5dk\" (UID: \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\") " pod="openstack/swift-ring-rebalance-8k5dk" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.392214 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-scripts\") pod \"swift-ring-rebalance-8k5dk\" (UID: \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\") " pod="openstack/swift-ring-rebalance-8k5dk" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.392252 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-dispersionconf\") pod \"swift-ring-rebalance-8k5dk\" (UID: \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\") " pod="openstack/swift-ring-rebalance-8k5dk" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.392318 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-swiftconf\") pod \"swift-ring-rebalance-8k5dk\" (UID: \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\") " pod="openstack/swift-ring-rebalance-8k5dk" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.392429 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-combined-ca-bundle\") pod \"swift-ring-rebalance-8k5dk\" (UID: \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\") " pod="openstack/swift-ring-rebalance-8k5dk" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.394014 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-etc-swift\") pod \"swift-ring-rebalance-8k5dk\" (UID: \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\") " pod="openstack/swift-ring-rebalance-8k5dk" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.394826 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-scripts\") pod \"swift-ring-rebalance-8k5dk\" (UID: \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\") " pod="openstack/swift-ring-rebalance-8k5dk" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.395618 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-ring-data-devices\") pod \"swift-ring-rebalance-8k5dk\" (UID: \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\") " pod="openstack/swift-ring-rebalance-8k5dk" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.398953 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-dispersionconf\") pod \"swift-ring-rebalance-8k5dk\" (UID: \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\") " pod="openstack/swift-ring-rebalance-8k5dk" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.400826 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-combined-ca-bundle\") pod \"swift-ring-rebalance-8k5dk\" (UID: \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\") " pod="openstack/swift-ring-rebalance-8k5dk" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.400929 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-swiftconf\") pod \"swift-ring-rebalance-8k5dk\" (UID: \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\") " pod="openstack/swift-ring-rebalance-8k5dk" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.414140 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ntb8w\" (UniqueName: \"kubernetes.io/projected/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-kube-api-access-ntb8w\") pod \"swift-ring-rebalance-8k5dk\" (UID: \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\") " pod="openstack/swift-ring-rebalance-8k5dk" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.414725 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-8k5dk" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.462420 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-586b989cdc-nvh2r" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.595820 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9b80f22d-bc85-41c3-95b3-8714b15c1359-dns-svc\") pod \"9b80f22d-bc85-41c3-95b3-8714b15c1359\" (UID: \"9b80f22d-bc85-41c3-95b3-8714b15c1359\") " Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.595955 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9b80f22d-bc85-41c3-95b3-8714b15c1359-ovsdbserver-sb\") pod \"9b80f22d-bc85-41c3-95b3-8714b15c1359\" (UID: \"9b80f22d-bc85-41c3-95b3-8714b15c1359\") " Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.595995 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v4q2m\" (UniqueName: \"kubernetes.io/projected/9b80f22d-bc85-41c3-95b3-8714b15c1359-kube-api-access-v4q2m\") pod \"9b80f22d-bc85-41c3-95b3-8714b15c1359\" (UID: \"9b80f22d-bc85-41c3-95b3-8714b15c1359\") " Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.596020 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9b80f22d-bc85-41c3-95b3-8714b15c1359-config\") pod \"9b80f22d-bc85-41c3-95b3-8714b15c1359\" (UID: \"9b80f22d-bc85-41c3-95b3-8714b15c1359\") " Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.596075 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9b80f22d-bc85-41c3-95b3-8714b15c1359-ovsdbserver-nb\") pod \"9b80f22d-bc85-41c3-95b3-8714b15c1359\" (UID: \"9b80f22d-bc85-41c3-95b3-8714b15c1359\") " Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.611400 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b80f22d-bc85-41c3-95b3-8714b15c1359-kube-api-access-v4q2m" (OuterVolumeSpecName: "kube-api-access-v4q2m") pod "9b80f22d-bc85-41c3-95b3-8714b15c1359" (UID: "9b80f22d-bc85-41c3-95b3-8714b15c1359"). InnerVolumeSpecName "kube-api-access-v4q2m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.641437 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b80f22d-bc85-41c3-95b3-8714b15c1359-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9b80f22d-bc85-41c3-95b3-8714b15c1359" (UID: "9b80f22d-bc85-41c3-95b3-8714b15c1359"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.646210 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b80f22d-bc85-41c3-95b3-8714b15c1359-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9b80f22d-bc85-41c3-95b3-8714b15c1359" (UID: "9b80f22d-bc85-41c3-95b3-8714b15c1359"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.651726 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b80f22d-bc85-41c3-95b3-8714b15c1359-config" (OuterVolumeSpecName: "config") pod "9b80f22d-bc85-41c3-95b3-8714b15c1359" (UID: "9b80f22d-bc85-41c3-95b3-8714b15c1359"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.652643 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b80f22d-bc85-41c3-95b3-8714b15c1359-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9b80f22d-bc85-41c3-95b3-8714b15c1359" (UID: "9b80f22d-bc85-41c3-95b3-8714b15c1359"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.701639 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9b80f22d-bc85-41c3-95b3-8714b15c1359-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.702046 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v4q2m\" (UniqueName: \"kubernetes.io/projected/9b80f22d-bc85-41c3-95b3-8714b15c1359-kube-api-access-v4q2m\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.702060 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9b80f22d-bc85-41c3-95b3-8714b15c1359-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.702069 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9b80f22d-bc85-41c3-95b3-8714b15c1359-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.702080 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9b80f22d-bc85-41c3-95b3-8714b15c1359-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.967933 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-8k5dk"] Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.989972 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586b989cdc-nvh2r" event={"ID":"9b80f22d-bc85-41c3-95b3-8714b15c1359","Type":"ContainerDied","Data":"f1b16996d4855d2a80e64fff90429579a023dee0fd73098bc7bd5428c1833fc0"} Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.990046 4910 scope.go:117] "RemoveContainer" containerID="bc25c07274c06a6469cd5b1ce580917f71e0ae9888a0c737cc910cbc12d067e6" Jan 05 22:10:01 crc kubenswrapper[4910]: I0105 22:10:01.990236 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-586b989cdc-nvh2r" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:01.999287 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67fdf7998c-ghk4m" event={"ID":"37ca269e-6da3-4dba-943f-6f2e957c8036","Type":"ContainerStarted","Data":"c3a60b5e77b926f34a95e627d81c9ffa761857008a997e6e6062410af816fedf"} Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:01.999333 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-67fdf7998c-ghk4m" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.063453 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-67fdf7998c-ghk4m" podStartSLOduration=3.063417891 podStartE2EDuration="3.063417891s" podCreationTimestamp="2026-01-05 22:09:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:10:02.053729627 +0000 UTC m=+1133.631227297" watchObservedRunningTime="2026-01-05 22:10:02.063417891 +0000 UTC m=+1133.640915561" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.163207 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-586b989cdc-nvh2r"] Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.195707 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-586b989cdc-nvh2r"] Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.234970 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-etc-swift\") pod \"swift-storage-0\" (UID: \"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c\") " pod="openstack/swift-storage-0" Jan 05 22:10:02 crc kubenswrapper[4910]: E0105 22:10:02.235192 4910 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 05 22:10:02 crc kubenswrapper[4910]: E0105 22:10:02.235211 4910 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 05 22:10:02 crc kubenswrapper[4910]: E0105 22:10:02.235257 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-etc-swift podName:4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c nodeName:}" failed. No retries permitted until 2026-01-05 22:10:04.235243089 +0000 UTC m=+1135.812740759 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-etc-swift") pod "swift-storage-0" (UID: "4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c") : configmap "swift-ring-files" not found Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.287347 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.476412 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Jan 05 22:10:02 crc kubenswrapper[4910]: E0105 22:10:02.477035 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b80f22d-bc85-41c3-95b3-8714b15c1359" containerName="init" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.477052 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b80f22d-bc85-41c3-95b3-8714b15c1359" containerName="init" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.477245 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b80f22d-bc85-41c3-95b3-8714b15c1359" containerName="init" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.478090 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.480776 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-6fsdw" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.480799 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.481024 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.481613 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.496186 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.641110 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f817e58c-a8aa-4f0d-8486-153659100a11-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"f817e58c-a8aa-4f0d-8486-153659100a11\") " pod="openstack/ovn-northd-0" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.641300 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f817e58c-a8aa-4f0d-8486-153659100a11-scripts\") pod \"ovn-northd-0\" (UID: \"f817e58c-a8aa-4f0d-8486-153659100a11\") " pod="openstack/ovn-northd-0" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.641345 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f817e58c-a8aa-4f0d-8486-153659100a11-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"f817e58c-a8aa-4f0d-8486-153659100a11\") " pod="openstack/ovn-northd-0" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.641376 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f817e58c-a8aa-4f0d-8486-153659100a11-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"f817e58c-a8aa-4f0d-8486-153659100a11\") " pod="openstack/ovn-northd-0" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.641403 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/f817e58c-a8aa-4f0d-8486-153659100a11-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"f817e58c-a8aa-4f0d-8486-153659100a11\") " pod="openstack/ovn-northd-0" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.641572 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f817e58c-a8aa-4f0d-8486-153659100a11-config\") pod \"ovn-northd-0\" (UID: \"f817e58c-a8aa-4f0d-8486-153659100a11\") " pod="openstack/ovn-northd-0" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.641760 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ljtbt\" (UniqueName: \"kubernetes.io/projected/f817e58c-a8aa-4f0d-8486-153659100a11-kube-api-access-ljtbt\") pod \"ovn-northd-0\" (UID: \"f817e58c-a8aa-4f0d-8486-153659100a11\") " pod="openstack/ovn-northd-0" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.731457 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b80f22d-bc85-41c3-95b3-8714b15c1359" path="/var/lib/kubelet/pods/9b80f22d-bc85-41c3-95b3-8714b15c1359/volumes" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.743445 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f817e58c-a8aa-4f0d-8486-153659100a11-scripts\") pod \"ovn-northd-0\" (UID: \"f817e58c-a8aa-4f0d-8486-153659100a11\") " pod="openstack/ovn-northd-0" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.743492 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f817e58c-a8aa-4f0d-8486-153659100a11-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"f817e58c-a8aa-4f0d-8486-153659100a11\") " pod="openstack/ovn-northd-0" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.743513 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f817e58c-a8aa-4f0d-8486-153659100a11-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"f817e58c-a8aa-4f0d-8486-153659100a11\") " pod="openstack/ovn-northd-0" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.743533 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/f817e58c-a8aa-4f0d-8486-153659100a11-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"f817e58c-a8aa-4f0d-8486-153659100a11\") " pod="openstack/ovn-northd-0" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.743561 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f817e58c-a8aa-4f0d-8486-153659100a11-config\") pod \"ovn-northd-0\" (UID: \"f817e58c-a8aa-4f0d-8486-153659100a11\") " pod="openstack/ovn-northd-0" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.743609 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ljtbt\" (UniqueName: \"kubernetes.io/projected/f817e58c-a8aa-4f0d-8486-153659100a11-kube-api-access-ljtbt\") pod \"ovn-northd-0\" (UID: \"f817e58c-a8aa-4f0d-8486-153659100a11\") " pod="openstack/ovn-northd-0" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.743657 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f817e58c-a8aa-4f0d-8486-153659100a11-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"f817e58c-a8aa-4f0d-8486-153659100a11\") " pod="openstack/ovn-northd-0" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.745149 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f817e58c-a8aa-4f0d-8486-153659100a11-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"f817e58c-a8aa-4f0d-8486-153659100a11\") " pod="openstack/ovn-northd-0" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.745571 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f817e58c-a8aa-4f0d-8486-153659100a11-config\") pod \"ovn-northd-0\" (UID: \"f817e58c-a8aa-4f0d-8486-153659100a11\") " pod="openstack/ovn-northd-0" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.745670 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f817e58c-a8aa-4f0d-8486-153659100a11-scripts\") pod \"ovn-northd-0\" (UID: \"f817e58c-a8aa-4f0d-8486-153659100a11\") " pod="openstack/ovn-northd-0" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.749967 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f817e58c-a8aa-4f0d-8486-153659100a11-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"f817e58c-a8aa-4f0d-8486-153659100a11\") " pod="openstack/ovn-northd-0" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.750962 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/f817e58c-a8aa-4f0d-8486-153659100a11-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"f817e58c-a8aa-4f0d-8486-153659100a11\") " pod="openstack/ovn-northd-0" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.751804 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f817e58c-a8aa-4f0d-8486-153659100a11-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"f817e58c-a8aa-4f0d-8486-153659100a11\") " pod="openstack/ovn-northd-0" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.763683 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ljtbt\" (UniqueName: \"kubernetes.io/projected/f817e58c-a8aa-4f0d-8486-153659100a11-kube-api-access-ljtbt\") pod \"ovn-northd-0\" (UID: \"f817e58c-a8aa-4f0d-8486-153659100a11\") " pod="openstack/ovn-northd-0" Jan 05 22:10:02 crc kubenswrapper[4910]: I0105 22:10:02.801545 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 05 22:10:03 crc kubenswrapper[4910]: I0105 22:10:03.028802 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-8k5dk" event={"ID":"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2","Type":"ContainerStarted","Data":"1ab49c0c942d6350408c29b1f42bf5a68aa69dd747094ed5e549166cd4d25685"} Jan 05 22:10:03 crc kubenswrapper[4910]: I0105 22:10:03.338208 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 05 22:10:03 crc kubenswrapper[4910]: W0105 22:10:03.360156 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf817e58c_a8aa_4f0d_8486_153659100a11.slice/crio-bfd4ad6c8a0477ecc83191d5bd773c2ac268eebcf23ed43a91863855f05d336b WatchSource:0}: Error finding container bfd4ad6c8a0477ecc83191d5bd773c2ac268eebcf23ed43a91863855f05d336b: Status 404 returned error can't find the container with id bfd4ad6c8a0477ecc83191d5bd773c2ac268eebcf23ed43a91863855f05d336b Jan 05 22:10:04 crc kubenswrapper[4910]: I0105 22:10:04.054941 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"f817e58c-a8aa-4f0d-8486-153659100a11","Type":"ContainerStarted","Data":"bfd4ad6c8a0477ecc83191d5bd773c2ac268eebcf23ed43a91863855f05d336b"} Jan 05 22:10:04 crc kubenswrapper[4910]: I0105 22:10:04.280439 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-etc-swift\") pod \"swift-storage-0\" (UID: \"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c\") " pod="openstack/swift-storage-0" Jan 05 22:10:04 crc kubenswrapper[4910]: E0105 22:10:04.280658 4910 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 05 22:10:04 crc kubenswrapper[4910]: E0105 22:10:04.280675 4910 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 05 22:10:04 crc kubenswrapper[4910]: E0105 22:10:04.280727 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-etc-swift podName:4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c nodeName:}" failed. No retries permitted until 2026-01-05 22:10:08.280708322 +0000 UTC m=+1139.858205992 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-etc-swift") pod "swift-storage-0" (UID: "4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c") : configmap "swift-ring-files" not found Jan 05 22:10:06 crc kubenswrapper[4910]: I0105 22:10:06.272819 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Jan 05 22:10:06 crc kubenswrapper[4910]: I0105 22:10:06.273217 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Jan 05 22:10:06 crc kubenswrapper[4910]: I0105 22:10:06.411399 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Jan 05 22:10:07 crc kubenswrapper[4910]: I0105 22:10:07.262392 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Jan 05 22:10:07 crc kubenswrapper[4910]: I0105 22:10:07.262866 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Jan 05 22:10:07 crc kubenswrapper[4910]: I0105 22:10:07.348762 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Jan 05 22:10:07 crc kubenswrapper[4910]: I0105 22:10:07.490918 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Jan 05 22:10:07 crc kubenswrapper[4910]: I0105 22:10:07.492588 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Jan 05 22:10:08 crc kubenswrapper[4910]: I0105 22:10:08.346440 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-etc-swift\") pod \"swift-storage-0\" (UID: \"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c\") " pod="openstack/swift-storage-0" Jan 05 22:10:08 crc kubenswrapper[4910]: E0105 22:10:08.347092 4910 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 05 22:10:08 crc kubenswrapper[4910]: E0105 22:10:08.347180 4910 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 05 22:10:08 crc kubenswrapper[4910]: E0105 22:10:08.347292 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-etc-swift podName:4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c nodeName:}" failed. No retries permitted until 2026-01-05 22:10:16.347270613 +0000 UTC m=+1147.924768283 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-etc-swift") pod "swift-storage-0" (UID: "4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c") : configmap "swift-ring-files" not found Jan 05 22:10:08 crc kubenswrapper[4910]: I0105 22:10:08.373187 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-8k5dk" event={"ID":"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2","Type":"ContainerStarted","Data":"3f94cee1613676c311b9e111ce3fc8d4a4347ded3807c06e278897a8efd59845"} Jan 05 22:10:08 crc kubenswrapper[4910]: I0105 22:10:08.420949 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-8k5dk" podStartSLOduration=2.314460749 podStartE2EDuration="7.420930941s" podCreationTimestamp="2026-01-05 22:10:01 +0000 UTC" firstStartedPulling="2026-01-05 22:10:02.083128127 +0000 UTC m=+1133.660625797" lastFinishedPulling="2026-01-05 22:10:07.189598319 +0000 UTC m=+1138.767095989" observedRunningTime="2026-01-05 22:10:08.418855731 +0000 UTC m=+1139.996353421" watchObservedRunningTime="2026-01-05 22:10:08.420930941 +0000 UTC m=+1139.998428611" Jan 05 22:10:09 crc kubenswrapper[4910]: I0105 22:10:09.390397 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"f817e58c-a8aa-4f0d-8486-153659100a11","Type":"ContainerStarted","Data":"6ca0b92febedb3c5a00c06e828a71d381ce355e0b02c17a53a8e077be5c0a627"} Jan 05 22:10:09 crc kubenswrapper[4910]: I0105 22:10:09.391075 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"f817e58c-a8aa-4f0d-8486-153659100a11","Type":"ContainerStarted","Data":"775a40a61c5552bd695159a6992312acd259814d06d50b9be807a55dfc4a58bf"} Jan 05 22:10:09 crc kubenswrapper[4910]: I0105 22:10:09.391115 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Jan 05 22:10:09 crc kubenswrapper[4910]: I0105 22:10:09.419510 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.49187128 podStartE2EDuration="7.419482644s" podCreationTimestamp="2026-01-05 22:10:02 +0000 UTC" firstStartedPulling="2026-01-05 22:10:03.361424863 +0000 UTC m=+1134.938922533" lastFinishedPulling="2026-01-05 22:10:08.289036237 +0000 UTC m=+1139.866533897" observedRunningTime="2026-01-05 22:10:09.414613017 +0000 UTC m=+1140.992110687" watchObservedRunningTime="2026-01-05 22:10:09.419482644 +0000 UTC m=+1140.996980314" Jan 05 22:10:09 crc kubenswrapper[4910]: I0105 22:10:09.698070 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-67fdf7998c-ghk4m" Jan 05 22:10:09 crc kubenswrapper[4910]: I0105 22:10:09.755352 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-jzdx6"] Jan 05 22:10:09 crc kubenswrapper[4910]: I0105 22:10:09.755745 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-95f5f6995-jzdx6" podUID="935e0e47-2ab3-473a-8567-c36883d62801" containerName="dnsmasq-dns" containerID="cri-o://8662a7e76a65bd149e6eb96fd5ab7fb9fc2cd767165bb3e41fe97269d05c1b6a" gracePeriod=10 Jan 05 22:10:10 crc kubenswrapper[4910]: I0105 22:10:10.276570 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95f5f6995-jzdx6" Jan 05 22:10:10 crc kubenswrapper[4910]: I0105 22:10:10.400101 4910 generic.go:334] "Generic (PLEG): container finished" podID="935e0e47-2ab3-473a-8567-c36883d62801" containerID="8662a7e76a65bd149e6eb96fd5ab7fb9fc2cd767165bb3e41fe97269d05c1b6a" exitCode=0 Jan 05 22:10:10 crc kubenswrapper[4910]: I0105 22:10:10.400178 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95f5f6995-jzdx6" Jan 05 22:10:10 crc kubenswrapper[4910]: I0105 22:10:10.400217 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95f5f6995-jzdx6" event={"ID":"935e0e47-2ab3-473a-8567-c36883d62801","Type":"ContainerDied","Data":"8662a7e76a65bd149e6eb96fd5ab7fb9fc2cd767165bb3e41fe97269d05c1b6a"} Jan 05 22:10:10 crc kubenswrapper[4910]: I0105 22:10:10.400310 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95f5f6995-jzdx6" event={"ID":"935e0e47-2ab3-473a-8567-c36883d62801","Type":"ContainerDied","Data":"8ab074d628521b49b924a9b92e79bb716c3bfdd6ec0c7e2a3da3595545a2619a"} Jan 05 22:10:10 crc kubenswrapper[4910]: I0105 22:10:10.400345 4910 scope.go:117] "RemoveContainer" containerID="8662a7e76a65bd149e6eb96fd5ab7fb9fc2cd767165bb3e41fe97269d05c1b6a" Jan 05 22:10:10 crc kubenswrapper[4910]: I0105 22:10:10.401522 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/935e0e47-2ab3-473a-8567-c36883d62801-config\") pod \"935e0e47-2ab3-473a-8567-c36883d62801\" (UID: \"935e0e47-2ab3-473a-8567-c36883d62801\") " Jan 05 22:10:10 crc kubenswrapper[4910]: I0105 22:10:10.401667 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8fmrn\" (UniqueName: \"kubernetes.io/projected/935e0e47-2ab3-473a-8567-c36883d62801-kube-api-access-8fmrn\") pod \"935e0e47-2ab3-473a-8567-c36883d62801\" (UID: \"935e0e47-2ab3-473a-8567-c36883d62801\") " Jan 05 22:10:10 crc kubenswrapper[4910]: I0105 22:10:10.403875 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/935e0e47-2ab3-473a-8567-c36883d62801-dns-svc\") pod \"935e0e47-2ab3-473a-8567-c36883d62801\" (UID: \"935e0e47-2ab3-473a-8567-c36883d62801\") " Jan 05 22:10:10 crc kubenswrapper[4910]: I0105 22:10:10.412946 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/935e0e47-2ab3-473a-8567-c36883d62801-kube-api-access-8fmrn" (OuterVolumeSpecName: "kube-api-access-8fmrn") pod "935e0e47-2ab3-473a-8567-c36883d62801" (UID: "935e0e47-2ab3-473a-8567-c36883d62801"). InnerVolumeSpecName "kube-api-access-8fmrn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:10:10 crc kubenswrapper[4910]: I0105 22:10:10.425473 4910 scope.go:117] "RemoveContainer" containerID="615205a20bf1e814c6020d601eee8a82bbd957ab147294f544e8549a5498bcd8" Jan 05 22:10:10 crc kubenswrapper[4910]: I0105 22:10:10.446235 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/935e0e47-2ab3-473a-8567-c36883d62801-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "935e0e47-2ab3-473a-8567-c36883d62801" (UID: "935e0e47-2ab3-473a-8567-c36883d62801"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:10:10 crc kubenswrapper[4910]: I0105 22:10:10.447786 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/935e0e47-2ab3-473a-8567-c36883d62801-config" (OuterVolumeSpecName: "config") pod "935e0e47-2ab3-473a-8567-c36883d62801" (UID: "935e0e47-2ab3-473a-8567-c36883d62801"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:10:10 crc kubenswrapper[4910]: I0105 22:10:10.496217 4910 scope.go:117] "RemoveContainer" containerID="8662a7e76a65bd149e6eb96fd5ab7fb9fc2cd767165bb3e41fe97269d05c1b6a" Jan 05 22:10:10 crc kubenswrapper[4910]: E0105 22:10:10.496899 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8662a7e76a65bd149e6eb96fd5ab7fb9fc2cd767165bb3e41fe97269d05c1b6a\": container with ID starting with 8662a7e76a65bd149e6eb96fd5ab7fb9fc2cd767165bb3e41fe97269d05c1b6a not found: ID does not exist" containerID="8662a7e76a65bd149e6eb96fd5ab7fb9fc2cd767165bb3e41fe97269d05c1b6a" Jan 05 22:10:10 crc kubenswrapper[4910]: I0105 22:10:10.496974 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8662a7e76a65bd149e6eb96fd5ab7fb9fc2cd767165bb3e41fe97269d05c1b6a"} err="failed to get container status \"8662a7e76a65bd149e6eb96fd5ab7fb9fc2cd767165bb3e41fe97269d05c1b6a\": rpc error: code = NotFound desc = could not find container \"8662a7e76a65bd149e6eb96fd5ab7fb9fc2cd767165bb3e41fe97269d05c1b6a\": container with ID starting with 8662a7e76a65bd149e6eb96fd5ab7fb9fc2cd767165bb3e41fe97269d05c1b6a not found: ID does not exist" Jan 05 22:10:10 crc kubenswrapper[4910]: I0105 22:10:10.497017 4910 scope.go:117] "RemoveContainer" containerID="615205a20bf1e814c6020d601eee8a82bbd957ab147294f544e8549a5498bcd8" Jan 05 22:10:10 crc kubenswrapper[4910]: E0105 22:10:10.498252 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"615205a20bf1e814c6020d601eee8a82bbd957ab147294f544e8549a5498bcd8\": container with ID starting with 615205a20bf1e814c6020d601eee8a82bbd957ab147294f544e8549a5498bcd8 not found: ID does not exist" containerID="615205a20bf1e814c6020d601eee8a82bbd957ab147294f544e8549a5498bcd8" Jan 05 22:10:10 crc kubenswrapper[4910]: I0105 22:10:10.498314 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"615205a20bf1e814c6020d601eee8a82bbd957ab147294f544e8549a5498bcd8"} err="failed to get container status \"615205a20bf1e814c6020d601eee8a82bbd957ab147294f544e8549a5498bcd8\": rpc error: code = NotFound desc = could not find container \"615205a20bf1e814c6020d601eee8a82bbd957ab147294f544e8549a5498bcd8\": container with ID starting with 615205a20bf1e814c6020d601eee8a82bbd957ab147294f544e8549a5498bcd8 not found: ID does not exist" Jan 05 22:10:10 crc kubenswrapper[4910]: I0105 22:10:10.506614 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/935e0e47-2ab3-473a-8567-c36883d62801-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:10 crc kubenswrapper[4910]: I0105 22:10:10.506672 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/935e0e47-2ab3-473a-8567-c36883d62801-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:10 crc kubenswrapper[4910]: I0105 22:10:10.506685 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8fmrn\" (UniqueName: \"kubernetes.io/projected/935e0e47-2ab3-473a-8567-c36883d62801-kube-api-access-8fmrn\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:10 crc kubenswrapper[4910]: I0105 22:10:10.743304 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-jzdx6"] Jan 05 22:10:10 crc kubenswrapper[4910]: I0105 22:10:10.752307 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-jzdx6"] Jan 05 22:10:12 crc kubenswrapper[4910]: I0105 22:10:12.732745 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="935e0e47-2ab3-473a-8567-c36883d62801" path="/var/lib/kubelet/pods/935e0e47-2ab3-473a-8567-c36883d62801/volumes" Jan 05 22:10:13 crc kubenswrapper[4910]: I0105 22:10:13.424392 4910 generic.go:334] "Generic (PLEG): container finished" podID="7e2a3efd-2de7-493e-af91-900b224e5313" containerID="299a441a9ea2f52977cbffea7f3f23ff9a1fa10c75e20ad3f6d05cf9c52d97b4" exitCode=0 Jan 05 22:10:13 crc kubenswrapper[4910]: I0105 22:10:13.424496 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7e2a3efd-2de7-493e-af91-900b224e5313","Type":"ContainerDied","Data":"299a441a9ea2f52977cbffea7f3f23ff9a1fa10c75e20ad3f6d05cf9c52d97b4"} Jan 05 22:10:13 crc kubenswrapper[4910]: I0105 22:10:13.425966 4910 generic.go:334] "Generic (PLEG): container finished" podID="b9cedfb5-8c45-434f-b04d-694bf6d600b8" containerID="2dd0985809b50b7237e41b4d234a09fc5fdb093346ee879d550b4f63215e2788" exitCode=0 Jan 05 22:10:13 crc kubenswrapper[4910]: I0105 22:10:13.426022 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b9cedfb5-8c45-434f-b04d-694bf6d600b8","Type":"ContainerDied","Data":"2dd0985809b50b7237e41b4d234a09fc5fdb093346ee879d550b4f63215e2788"} Jan 05 22:10:14 crc kubenswrapper[4910]: I0105 22:10:14.437390 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b9cedfb5-8c45-434f-b04d-694bf6d600b8","Type":"ContainerStarted","Data":"2c95bc32934ba46ce9701d8eb4e4fdb43de1b82593499f287b4f2c6458380007"} Jan 05 22:10:14 crc kubenswrapper[4910]: I0105 22:10:14.439256 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:10:14 crc kubenswrapper[4910]: I0105 22:10:14.442266 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7e2a3efd-2de7-493e-af91-900b224e5313","Type":"ContainerStarted","Data":"642125ac821bb754a0c42680f8f99f5a13b1a90ac3a61d0a934715684f4248eb"} Jan 05 22:10:14 crc kubenswrapper[4910]: I0105 22:10:14.442868 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 05 22:10:14 crc kubenswrapper[4910]: I0105 22:10:14.466542 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=51.466518972 podStartE2EDuration="51.466518972s" podCreationTimestamp="2026-01-05 22:09:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:10:14.461247915 +0000 UTC m=+1146.038745585" watchObservedRunningTime="2026-01-05 22:10:14.466518972 +0000 UTC m=+1146.044016642" Jan 05 22:10:14 crc kubenswrapper[4910]: I0105 22:10:14.502404 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=40.648704083 podStartE2EDuration="51.502330956s" podCreationTimestamp="2026-01-05 22:09:23 +0000 UTC" firstStartedPulling="2026-01-05 22:09:28.073825856 +0000 UTC m=+1099.651323526" lastFinishedPulling="2026-01-05 22:09:38.927452729 +0000 UTC m=+1110.504950399" observedRunningTime="2026-01-05 22:10:14.491937316 +0000 UTC m=+1146.069434996" watchObservedRunningTime="2026-01-05 22:10:14.502330956 +0000 UTC m=+1146.079828626" Jan 05 22:10:14 crc kubenswrapper[4910]: I0105 22:10:14.667251 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-xl6z4"] Jan 05 22:10:14 crc kubenswrapper[4910]: E0105 22:10:14.668354 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="935e0e47-2ab3-473a-8567-c36883d62801" containerName="dnsmasq-dns" Jan 05 22:10:14 crc kubenswrapper[4910]: I0105 22:10:14.668452 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="935e0e47-2ab3-473a-8567-c36883d62801" containerName="dnsmasq-dns" Jan 05 22:10:14 crc kubenswrapper[4910]: E0105 22:10:14.668560 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="935e0e47-2ab3-473a-8567-c36883d62801" containerName="init" Jan 05 22:10:14 crc kubenswrapper[4910]: I0105 22:10:14.668624 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="935e0e47-2ab3-473a-8567-c36883d62801" containerName="init" Jan 05 22:10:14 crc kubenswrapper[4910]: I0105 22:10:14.668854 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="935e0e47-2ab3-473a-8567-c36883d62801" containerName="dnsmasq-dns" Jan 05 22:10:14 crc kubenswrapper[4910]: I0105 22:10:14.669486 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-xl6z4" Jan 05 22:10:14 crc kubenswrapper[4910]: I0105 22:10:14.678810 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-xl6z4"] Jan 05 22:10:14 crc kubenswrapper[4910]: I0105 22:10:14.681431 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 05 22:10:14 crc kubenswrapper[4910]: I0105 22:10:14.683040 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6dad2940-a273-4f20-90d6-92278f45e92e-operator-scripts\") pod \"root-account-create-update-xl6z4\" (UID: \"6dad2940-a273-4f20-90d6-92278f45e92e\") " pod="openstack/root-account-create-update-xl6z4" Jan 05 22:10:14 crc kubenswrapper[4910]: I0105 22:10:14.683184 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvn2r\" (UniqueName: \"kubernetes.io/projected/6dad2940-a273-4f20-90d6-92278f45e92e-kube-api-access-zvn2r\") pod \"root-account-create-update-xl6z4\" (UID: \"6dad2940-a273-4f20-90d6-92278f45e92e\") " pod="openstack/root-account-create-update-xl6z4" Jan 05 22:10:14 crc kubenswrapper[4910]: I0105 22:10:14.785113 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6dad2940-a273-4f20-90d6-92278f45e92e-operator-scripts\") pod \"root-account-create-update-xl6z4\" (UID: \"6dad2940-a273-4f20-90d6-92278f45e92e\") " pod="openstack/root-account-create-update-xl6z4" Jan 05 22:10:14 crc kubenswrapper[4910]: I0105 22:10:14.785365 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvn2r\" (UniqueName: \"kubernetes.io/projected/6dad2940-a273-4f20-90d6-92278f45e92e-kube-api-access-zvn2r\") pod \"root-account-create-update-xl6z4\" (UID: \"6dad2940-a273-4f20-90d6-92278f45e92e\") " pod="openstack/root-account-create-update-xl6z4" Jan 05 22:10:14 crc kubenswrapper[4910]: I0105 22:10:14.787079 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6dad2940-a273-4f20-90d6-92278f45e92e-operator-scripts\") pod \"root-account-create-update-xl6z4\" (UID: \"6dad2940-a273-4f20-90d6-92278f45e92e\") " pod="openstack/root-account-create-update-xl6z4" Jan 05 22:10:14 crc kubenswrapper[4910]: I0105 22:10:14.806008 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvn2r\" (UniqueName: \"kubernetes.io/projected/6dad2940-a273-4f20-90d6-92278f45e92e-kube-api-access-zvn2r\") pod \"root-account-create-update-xl6z4\" (UID: \"6dad2940-a273-4f20-90d6-92278f45e92e\") " pod="openstack/root-account-create-update-xl6z4" Jan 05 22:10:15 crc kubenswrapper[4910]: I0105 22:10:15.031416 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-xl6z4" Jan 05 22:10:15 crc kubenswrapper[4910]: I0105 22:10:15.452589 4910 generic.go:334] "Generic (PLEG): container finished" podID="f72b3b94-d06e-444e-bfdf-d9fbb4d46db2" containerID="3f94cee1613676c311b9e111ce3fc8d4a4347ded3807c06e278897a8efd59845" exitCode=0 Jan 05 22:10:15 crc kubenswrapper[4910]: I0105 22:10:15.452686 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-8k5dk" event={"ID":"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2","Type":"ContainerDied","Data":"3f94cee1613676c311b9e111ce3fc8d4a4347ded3807c06e278897a8efd59845"} Jan 05 22:10:15 crc kubenswrapper[4910]: I0105 22:10:15.552877 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-xl6z4"] Jan 05 22:10:15 crc kubenswrapper[4910]: W0105 22:10:15.557304 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6dad2940_a273_4f20_90d6_92278f45e92e.slice/crio-389dfae3bc984652e49bea888326a6619b7a3288e94ff3d91f60dd7bab756a1d WatchSource:0}: Error finding container 389dfae3bc984652e49bea888326a6619b7a3288e94ff3d91f60dd7bab756a1d: Status 404 returned error can't find the container with id 389dfae3bc984652e49bea888326a6619b7a3288e94ff3d91f60dd7bab756a1d Jan 05 22:10:16 crc kubenswrapper[4910]: I0105 22:10:16.416417 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-etc-swift\") pod \"swift-storage-0\" (UID: \"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c\") " pod="openstack/swift-storage-0" Jan 05 22:10:16 crc kubenswrapper[4910]: I0105 22:10:16.425711 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-etc-swift\") pod \"swift-storage-0\" (UID: \"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c\") " pod="openstack/swift-storage-0" Jan 05 22:10:16 crc kubenswrapper[4910]: I0105 22:10:16.468155 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-xl6z4" event={"ID":"6dad2940-a273-4f20-90d6-92278f45e92e","Type":"ContainerStarted","Data":"389dfae3bc984652e49bea888326a6619b7a3288e94ff3d91f60dd7bab756a1d"} Jan 05 22:10:16 crc kubenswrapper[4910]: I0105 22:10:16.717730 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 05 22:10:16 crc kubenswrapper[4910]: I0105 22:10:16.881771 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-8k5dk" Jan 05 22:10:16 crc kubenswrapper[4910]: I0105 22:10:16.925676 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-etc-swift\") pod \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\" (UID: \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\") " Jan 05 22:10:16 crc kubenswrapper[4910]: I0105 22:10:16.925714 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-swiftconf\") pod \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\" (UID: \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\") " Jan 05 22:10:16 crc kubenswrapper[4910]: I0105 22:10:16.925784 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-combined-ca-bundle\") pod \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\" (UID: \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\") " Jan 05 22:10:16 crc kubenswrapper[4910]: I0105 22:10:16.925816 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-scripts\") pod \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\" (UID: \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\") " Jan 05 22:10:16 crc kubenswrapper[4910]: I0105 22:10:16.925857 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-ring-data-devices\") pod \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\" (UID: \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\") " Jan 05 22:10:16 crc kubenswrapper[4910]: I0105 22:10:16.925908 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-dispersionconf\") pod \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\" (UID: \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\") " Jan 05 22:10:16 crc kubenswrapper[4910]: I0105 22:10:16.925944 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ntb8w\" (UniqueName: \"kubernetes.io/projected/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-kube-api-access-ntb8w\") pod \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\" (UID: \"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2\") " Jan 05 22:10:16 crc kubenswrapper[4910]: I0105 22:10:16.927615 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "f72b3b94-d06e-444e-bfdf-d9fbb4d46db2" (UID: "f72b3b94-d06e-444e-bfdf-d9fbb4d46db2"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:10:16 crc kubenswrapper[4910]: I0105 22:10:16.927662 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "f72b3b94-d06e-444e-bfdf-d9fbb4d46db2" (UID: "f72b3b94-d06e-444e-bfdf-d9fbb4d46db2"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:10:16 crc kubenswrapper[4910]: I0105 22:10:16.935387 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-kube-api-access-ntb8w" (OuterVolumeSpecName: "kube-api-access-ntb8w") pod "f72b3b94-d06e-444e-bfdf-d9fbb4d46db2" (UID: "f72b3b94-d06e-444e-bfdf-d9fbb4d46db2"). InnerVolumeSpecName "kube-api-access-ntb8w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:10:16 crc kubenswrapper[4910]: I0105 22:10:16.952890 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "f72b3b94-d06e-444e-bfdf-d9fbb4d46db2" (UID: "f72b3b94-d06e-444e-bfdf-d9fbb4d46db2"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:10:16 crc kubenswrapper[4910]: I0105 22:10:16.955553 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "f72b3b94-d06e-444e-bfdf-d9fbb4d46db2" (UID: "f72b3b94-d06e-444e-bfdf-d9fbb4d46db2"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:10:16 crc kubenswrapper[4910]: I0105 22:10:16.990777 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f72b3b94-d06e-444e-bfdf-d9fbb4d46db2" (UID: "f72b3b94-d06e-444e-bfdf-d9fbb4d46db2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.004234 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-9c2dv"] Jan 05 22:10:17 crc kubenswrapper[4910]: E0105 22:10:17.004719 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f72b3b94-d06e-444e-bfdf-d9fbb4d46db2" containerName="swift-ring-rebalance" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.004738 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f72b3b94-d06e-444e-bfdf-d9fbb4d46db2" containerName="swift-ring-rebalance" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.004966 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f72b3b94-d06e-444e-bfdf-d9fbb4d46db2" containerName="swift-ring-rebalance" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.005784 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-9c2dv" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.007844 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-scripts" (OuterVolumeSpecName: "scripts") pod "f72b3b94-d06e-444e-bfdf-d9fbb4d46db2" (UID: "f72b3b94-d06e-444e-bfdf-d9fbb4d46db2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.015246 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-9c2dv"] Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.026986 4910 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.027175 4910 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.027249 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.027309 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.027364 4910 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.027428 4910 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.027483 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ntb8w\" (UniqueName: \"kubernetes.io/projected/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2-kube-api-access-ntb8w\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.122858 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-5b71-account-create-update-5bc47"] Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.124238 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5b71-account-create-update-5bc47" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.130179 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.130458 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jjwqw\" (UniqueName: \"kubernetes.io/projected/910bd239-dc3a-47a1-9a25-5e21046f9725-kube-api-access-jjwqw\") pod \"keystone-db-create-9c2dv\" (UID: \"910bd239-dc3a-47a1-9a25-5e21046f9725\") " pod="openstack/keystone-db-create-9c2dv" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.130519 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/910bd239-dc3a-47a1-9a25-5e21046f9725-operator-scripts\") pod \"keystone-db-create-9c2dv\" (UID: \"910bd239-dc3a-47a1-9a25-5e21046f9725\") " pod="openstack/keystone-db-create-9c2dv" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.160595 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-5b71-account-create-update-5bc47"] Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.231915 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jjwqw\" (UniqueName: \"kubernetes.io/projected/910bd239-dc3a-47a1-9a25-5e21046f9725-kube-api-access-jjwqw\") pod \"keystone-db-create-9c2dv\" (UID: \"910bd239-dc3a-47a1-9a25-5e21046f9725\") " pod="openstack/keystone-db-create-9c2dv" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.232230 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/910bd239-dc3a-47a1-9a25-5e21046f9725-operator-scripts\") pod \"keystone-db-create-9c2dv\" (UID: \"910bd239-dc3a-47a1-9a25-5e21046f9725\") " pod="openstack/keystone-db-create-9c2dv" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.232262 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xssrs\" (UniqueName: \"kubernetes.io/projected/ea2fcf34-e416-43a2-a488-a9e952d19b81-kube-api-access-xssrs\") pod \"keystone-5b71-account-create-update-5bc47\" (UID: \"ea2fcf34-e416-43a2-a488-a9e952d19b81\") " pod="openstack/keystone-5b71-account-create-update-5bc47" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.232322 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ea2fcf34-e416-43a2-a488-a9e952d19b81-operator-scripts\") pod \"keystone-5b71-account-create-update-5bc47\" (UID: \"ea2fcf34-e416-43a2-a488-a9e952d19b81\") " pod="openstack/keystone-5b71-account-create-update-5bc47" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.232951 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/910bd239-dc3a-47a1-9a25-5e21046f9725-operator-scripts\") pod \"keystone-db-create-9c2dv\" (UID: \"910bd239-dc3a-47a1-9a25-5e21046f9725\") " pod="openstack/keystone-db-create-9c2dv" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.252763 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jjwqw\" (UniqueName: \"kubernetes.io/projected/910bd239-dc3a-47a1-9a25-5e21046f9725-kube-api-access-jjwqw\") pod \"keystone-db-create-9c2dv\" (UID: \"910bd239-dc3a-47a1-9a25-5e21046f9725\") " pod="openstack/keystone-db-create-9c2dv" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.294298 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-ckwvh"] Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.295734 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-ckwvh" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.302275 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-ckwvh"] Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.327041 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-9c2dv" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.333325 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ea2fcf34-e416-43a2-a488-a9e952d19b81-operator-scripts\") pod \"keystone-5b71-account-create-update-5bc47\" (UID: \"ea2fcf34-e416-43a2-a488-a9e952d19b81\") " pod="openstack/keystone-5b71-account-create-update-5bc47" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.333432 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xssrs\" (UniqueName: \"kubernetes.io/projected/ea2fcf34-e416-43a2-a488-a9e952d19b81-kube-api-access-xssrs\") pod \"keystone-5b71-account-create-update-5bc47\" (UID: \"ea2fcf34-e416-43a2-a488-a9e952d19b81\") " pod="openstack/keystone-5b71-account-create-update-5bc47" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.334168 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ea2fcf34-e416-43a2-a488-a9e952d19b81-operator-scripts\") pod \"keystone-5b71-account-create-update-5bc47\" (UID: \"ea2fcf34-e416-43a2-a488-a9e952d19b81\") " pod="openstack/keystone-5b71-account-create-update-5bc47" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.349630 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xssrs\" (UniqueName: \"kubernetes.io/projected/ea2fcf34-e416-43a2-a488-a9e952d19b81-kube-api-access-xssrs\") pod \"keystone-5b71-account-create-update-5bc47\" (UID: \"ea2fcf34-e416-43a2-a488-a9e952d19b81\") " pod="openstack/keystone-5b71-account-create-update-5bc47" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.396466 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-8d3f-account-create-update-8rkt2"] Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.397513 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-8d3f-account-create-update-8rkt2" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.399735 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.434860 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xxgk4\" (UniqueName: \"kubernetes.io/projected/0284aa1c-869a-43b1-9984-488eaed6ba0b-kube-api-access-xxgk4\") pod \"placement-db-create-ckwvh\" (UID: \"0284aa1c-869a-43b1-9984-488eaed6ba0b\") " pod="openstack/placement-db-create-ckwvh" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.434912 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-8d3f-account-create-update-8rkt2"] Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.435270 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0284aa1c-869a-43b1-9984-488eaed6ba0b-operator-scripts\") pod \"placement-db-create-ckwvh\" (UID: \"0284aa1c-869a-43b1-9984-488eaed6ba0b\") " pod="openstack/placement-db-create-ckwvh" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.448773 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5b71-account-create-update-5bc47" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.493911 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.498168 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-8k5dk" event={"ID":"f72b3b94-d06e-444e-bfdf-d9fbb4d46db2","Type":"ContainerDied","Data":"1ab49c0c942d6350408c29b1f42bf5a68aa69dd747094ed5e549166cd4d25685"} Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.498705 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1ab49c0c942d6350408c29b1f42bf5a68aa69dd747094ed5e549166cd4d25685" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.498671 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-8k5dk" Jan 05 22:10:17 crc kubenswrapper[4910]: W0105 22:10:17.514901 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4aa2e70b_9eb7_4ea1_9fdb_2687a340ba9c.slice/crio-65e5934656c083e3476166b6be1811539b0e5d926d9b6397c4b98cd4f297f842 WatchSource:0}: Error finding container 65e5934656c083e3476166b6be1811539b0e5d926d9b6397c4b98cd4f297f842: Status 404 returned error can't find the container with id 65e5934656c083e3476166b6be1811539b0e5d926d9b6397c4b98cd4f297f842 Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.536876 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8h9x2\" (UniqueName: \"kubernetes.io/projected/46c3eb83-1eeb-4c44-b474-73e50c5afd6e-kube-api-access-8h9x2\") pod \"placement-8d3f-account-create-update-8rkt2\" (UID: \"46c3eb83-1eeb-4c44-b474-73e50c5afd6e\") " pod="openstack/placement-8d3f-account-create-update-8rkt2" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.536944 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0284aa1c-869a-43b1-9984-488eaed6ba0b-operator-scripts\") pod \"placement-db-create-ckwvh\" (UID: \"0284aa1c-869a-43b1-9984-488eaed6ba0b\") " pod="openstack/placement-db-create-ckwvh" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.537045 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xxgk4\" (UniqueName: \"kubernetes.io/projected/0284aa1c-869a-43b1-9984-488eaed6ba0b-kube-api-access-xxgk4\") pod \"placement-db-create-ckwvh\" (UID: \"0284aa1c-869a-43b1-9984-488eaed6ba0b\") " pod="openstack/placement-db-create-ckwvh" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.537101 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46c3eb83-1eeb-4c44-b474-73e50c5afd6e-operator-scripts\") pod \"placement-8d3f-account-create-update-8rkt2\" (UID: \"46c3eb83-1eeb-4c44-b474-73e50c5afd6e\") " pod="openstack/placement-8d3f-account-create-update-8rkt2" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.538939 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0284aa1c-869a-43b1-9984-488eaed6ba0b-operator-scripts\") pod \"placement-db-create-ckwvh\" (UID: \"0284aa1c-869a-43b1-9984-488eaed6ba0b\") " pod="openstack/placement-db-create-ckwvh" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.574400 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xxgk4\" (UniqueName: \"kubernetes.io/projected/0284aa1c-869a-43b1-9984-488eaed6ba0b-kube-api-access-xxgk4\") pod \"placement-db-create-ckwvh\" (UID: \"0284aa1c-869a-43b1-9984-488eaed6ba0b\") " pod="openstack/placement-db-create-ckwvh" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.643166 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-ckwvh" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.644245 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46c3eb83-1eeb-4c44-b474-73e50c5afd6e-operator-scripts\") pod \"placement-8d3f-account-create-update-8rkt2\" (UID: \"46c3eb83-1eeb-4c44-b474-73e50c5afd6e\") " pod="openstack/placement-8d3f-account-create-update-8rkt2" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.644304 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8h9x2\" (UniqueName: \"kubernetes.io/projected/46c3eb83-1eeb-4c44-b474-73e50c5afd6e-kube-api-access-8h9x2\") pod \"placement-8d3f-account-create-update-8rkt2\" (UID: \"46c3eb83-1eeb-4c44-b474-73e50c5afd6e\") " pod="openstack/placement-8d3f-account-create-update-8rkt2" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.646363 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46c3eb83-1eeb-4c44-b474-73e50c5afd6e-operator-scripts\") pod \"placement-8d3f-account-create-update-8rkt2\" (UID: \"46c3eb83-1eeb-4c44-b474-73e50c5afd6e\") " pod="openstack/placement-8d3f-account-create-update-8rkt2" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.655819 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-2c36-account-create-update-x4kcc"] Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.657221 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-2c36-account-create-update-x4kcc" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.664302 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.672237 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8h9x2\" (UniqueName: \"kubernetes.io/projected/46c3eb83-1eeb-4c44-b474-73e50c5afd6e-kube-api-access-8h9x2\") pod \"placement-8d3f-account-create-update-8rkt2\" (UID: \"46c3eb83-1eeb-4c44-b474-73e50c5afd6e\") " pod="openstack/placement-8d3f-account-create-update-8rkt2" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.683621 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-x9vtx"] Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.684721 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-x9vtx" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.701928 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-2c36-account-create-update-x4kcc"] Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.740316 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-x9vtx"] Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.791262 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-8d3f-account-create-update-8rkt2" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.847967 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d9a1e15-a5b4-46bd-89d0-92c58b63c416-operator-scripts\") pod \"glance-db-create-x9vtx\" (UID: \"6d9a1e15-a5b4-46bd-89d0-92c58b63c416\") " pod="openstack/glance-db-create-x9vtx" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.848036 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-84wrj\" (UniqueName: \"kubernetes.io/projected/5129c20f-7129-4dde-b59e-3d2d291c87c1-kube-api-access-84wrj\") pod \"glance-2c36-account-create-update-x4kcc\" (UID: \"5129c20f-7129-4dde-b59e-3d2d291c87c1\") " pod="openstack/glance-2c36-account-create-update-x4kcc" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.848064 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t5hgv\" (UniqueName: \"kubernetes.io/projected/6d9a1e15-a5b4-46bd-89d0-92c58b63c416-kube-api-access-t5hgv\") pod \"glance-db-create-x9vtx\" (UID: \"6d9a1e15-a5b4-46bd-89d0-92c58b63c416\") " pod="openstack/glance-db-create-x9vtx" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.848087 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5129c20f-7129-4dde-b59e-3d2d291c87c1-operator-scripts\") pod \"glance-2c36-account-create-update-x4kcc\" (UID: \"5129c20f-7129-4dde-b59e-3d2d291c87c1\") " pod="openstack/glance-2c36-account-create-update-x4kcc" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.933066 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-9c2dv"] Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.949113 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d9a1e15-a5b4-46bd-89d0-92c58b63c416-operator-scripts\") pod \"glance-db-create-x9vtx\" (UID: \"6d9a1e15-a5b4-46bd-89d0-92c58b63c416\") " pod="openstack/glance-db-create-x9vtx" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.953665 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-84wrj\" (UniqueName: \"kubernetes.io/projected/5129c20f-7129-4dde-b59e-3d2d291c87c1-kube-api-access-84wrj\") pod \"glance-2c36-account-create-update-x4kcc\" (UID: \"5129c20f-7129-4dde-b59e-3d2d291c87c1\") " pod="openstack/glance-2c36-account-create-update-x4kcc" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.953771 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t5hgv\" (UniqueName: \"kubernetes.io/projected/6d9a1e15-a5b4-46bd-89d0-92c58b63c416-kube-api-access-t5hgv\") pod \"glance-db-create-x9vtx\" (UID: \"6d9a1e15-a5b4-46bd-89d0-92c58b63c416\") " pod="openstack/glance-db-create-x9vtx" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.953823 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5129c20f-7129-4dde-b59e-3d2d291c87c1-operator-scripts\") pod \"glance-2c36-account-create-update-x4kcc\" (UID: \"5129c20f-7129-4dde-b59e-3d2d291c87c1\") " pod="openstack/glance-2c36-account-create-update-x4kcc" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.954923 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5129c20f-7129-4dde-b59e-3d2d291c87c1-operator-scripts\") pod \"glance-2c36-account-create-update-x4kcc\" (UID: \"5129c20f-7129-4dde-b59e-3d2d291c87c1\") " pod="openstack/glance-2c36-account-create-update-x4kcc" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.955142 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d9a1e15-a5b4-46bd-89d0-92c58b63c416-operator-scripts\") pod \"glance-db-create-x9vtx\" (UID: \"6d9a1e15-a5b4-46bd-89d0-92c58b63c416\") " pod="openstack/glance-db-create-x9vtx" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.976747 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-84wrj\" (UniqueName: \"kubernetes.io/projected/5129c20f-7129-4dde-b59e-3d2d291c87c1-kube-api-access-84wrj\") pod \"glance-2c36-account-create-update-x4kcc\" (UID: \"5129c20f-7129-4dde-b59e-3d2d291c87c1\") " pod="openstack/glance-2c36-account-create-update-x4kcc" Jan 05 22:10:17 crc kubenswrapper[4910]: I0105 22:10:17.987260 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t5hgv\" (UniqueName: \"kubernetes.io/projected/6d9a1e15-a5b4-46bd-89d0-92c58b63c416-kube-api-access-t5hgv\") pod \"glance-db-create-x9vtx\" (UID: \"6d9a1e15-a5b4-46bd-89d0-92c58b63c416\") " pod="openstack/glance-db-create-x9vtx" Jan 05 22:10:18 crc kubenswrapper[4910]: I0105 22:10:18.073625 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-2c36-account-create-update-x4kcc" Jan 05 22:10:18 crc kubenswrapper[4910]: I0105 22:10:18.087646 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-x9vtx" Jan 05 22:10:18 crc kubenswrapper[4910]: I0105 22:10:18.123121 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-5b71-account-create-update-5bc47"] Jan 05 22:10:18 crc kubenswrapper[4910]: I0105 22:10:18.213757 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-ckwvh"] Jan 05 22:10:18 crc kubenswrapper[4910]: W0105 22:10:18.241637 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0284aa1c_869a_43b1_9984_488eaed6ba0b.slice/crio-411d843e3e4478758a3efc9c74fbd32918b56c14b6b02c260fa84ebb89b093b1 WatchSource:0}: Error finding container 411d843e3e4478758a3efc9c74fbd32918b56c14b6b02c260fa84ebb89b093b1: Status 404 returned error can't find the container with id 411d843e3e4478758a3efc9c74fbd32918b56c14b6b02c260fa84ebb89b093b1 Jan 05 22:10:18 crc kubenswrapper[4910]: I0105 22:10:18.326288 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-8d3f-account-create-update-8rkt2"] Jan 05 22:10:18 crc kubenswrapper[4910]: I0105 22:10:18.469053 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-cfp97" podUID="9253fb1e-9dce-4e54-80ee-fba5e3152596" containerName="ovn-controller" probeResult="failure" output=< Jan 05 22:10:18 crc kubenswrapper[4910]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 05 22:10:18 crc kubenswrapper[4910]: > Jan 05 22:10:18 crc kubenswrapper[4910]: I0105 22:10:18.507203 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-8d3f-account-create-update-8rkt2" event={"ID":"46c3eb83-1eeb-4c44-b474-73e50c5afd6e","Type":"ContainerStarted","Data":"209dfa34df38a4d3156ed7342b8be52ace2d9dc012a13b7fe2943a10ddc1aeb7"} Jan 05 22:10:18 crc kubenswrapper[4910]: I0105 22:10:18.508456 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-9c2dv" event={"ID":"910bd239-dc3a-47a1-9a25-5e21046f9725","Type":"ContainerStarted","Data":"1f73c1d8d2fa0e648b4c56ae4c50cff94f88b4a2a3829bb9ebdda57b47083e6c"} Jan 05 22:10:18 crc kubenswrapper[4910]: I0105 22:10:18.508511 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-9c2dv" event={"ID":"910bd239-dc3a-47a1-9a25-5e21046f9725","Type":"ContainerStarted","Data":"f7898c66ed849b43946cdda47f8cea1b9b69bb53f5a4e9fad076f375d7c65933"} Jan 05 22:10:18 crc kubenswrapper[4910]: I0105 22:10:18.509720 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-xl6z4" event={"ID":"6dad2940-a273-4f20-90d6-92278f45e92e","Type":"ContainerStarted","Data":"c3d8f1f1aad1dd8d9445060302319a5bd2edb7b74f4c04887e27e4076c5c28f8"} Jan 05 22:10:18 crc kubenswrapper[4910]: I0105 22:10:18.511674 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-ckwvh" event={"ID":"0284aa1c-869a-43b1-9984-488eaed6ba0b","Type":"ContainerStarted","Data":"411d843e3e4478758a3efc9c74fbd32918b56c14b6b02c260fa84ebb89b093b1"} Jan 05 22:10:18 crc kubenswrapper[4910]: I0105 22:10:18.512721 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5b71-account-create-update-5bc47" event={"ID":"ea2fcf34-e416-43a2-a488-a9e952d19b81","Type":"ContainerStarted","Data":"29a8bfd2df3b4c9d31f40a265bac78979d6eea42d86fa09c08f78a180b42f497"} Jan 05 22:10:18 crc kubenswrapper[4910]: I0105 22:10:18.513946 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c","Type":"ContainerStarted","Data":"65e5934656c083e3476166b6be1811539b0e5d926d9b6397c4b98cd4f297f842"} Jan 05 22:10:18 crc kubenswrapper[4910]: I0105 22:10:18.530609 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/root-account-create-update-xl6z4" podStartSLOduration=4.530583031 podStartE2EDuration="4.530583031s" podCreationTimestamp="2026-01-05 22:10:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:10:18.522728021 +0000 UTC m=+1150.100225701" watchObservedRunningTime="2026-01-05 22:10:18.530583031 +0000 UTC m=+1150.108080701" Jan 05 22:10:18 crc kubenswrapper[4910]: I0105 22:10:18.556557 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-2c36-account-create-update-x4kcc"] Jan 05 22:10:18 crc kubenswrapper[4910]: I0105 22:10:18.606571 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-x9vtx"] Jan 05 22:10:18 crc kubenswrapper[4910]: W0105 22:10:18.628060 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d9a1e15_a5b4_46bd_89d0_92c58b63c416.slice/crio-7e2b54417fbf80f319ca721eb5496d4f03d93a913a61da4c8ac1e7ee55b073b5 WatchSource:0}: Error finding container 7e2b54417fbf80f319ca721eb5496d4f03d93a913a61da4c8ac1e7ee55b073b5: Status 404 returned error can't find the container with id 7e2b54417fbf80f319ca721eb5496d4f03d93a913a61da4c8ac1e7ee55b073b5 Jan 05 22:10:19 crc kubenswrapper[4910]: I0105 22:10:19.524459 4910 generic.go:334] "Generic (PLEG): container finished" podID="0284aa1c-869a-43b1-9984-488eaed6ba0b" containerID="a5b95b626e7d918635b41ca0b7a9ee9cb24d913e67ab2531f8e4eae329e3f286" exitCode=0 Jan 05 22:10:19 crc kubenswrapper[4910]: I0105 22:10:19.524688 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-ckwvh" event={"ID":"0284aa1c-869a-43b1-9984-488eaed6ba0b","Type":"ContainerDied","Data":"a5b95b626e7d918635b41ca0b7a9ee9cb24d913e67ab2531f8e4eae329e3f286"} Jan 05 22:10:19 crc kubenswrapper[4910]: I0105 22:10:19.536924 4910 generic.go:334] "Generic (PLEG): container finished" podID="ea2fcf34-e416-43a2-a488-a9e952d19b81" containerID="5c5f0586368b8c9a50edd5e1d2baab3ab73b86e4b18876168fd793ac4c0ba552" exitCode=0 Jan 05 22:10:19 crc kubenswrapper[4910]: I0105 22:10:19.537004 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5b71-account-create-update-5bc47" event={"ID":"ea2fcf34-e416-43a2-a488-a9e952d19b81","Type":"ContainerDied","Data":"5c5f0586368b8c9a50edd5e1d2baab3ab73b86e4b18876168fd793ac4c0ba552"} Jan 05 22:10:19 crc kubenswrapper[4910]: I0105 22:10:19.540849 4910 generic.go:334] "Generic (PLEG): container finished" podID="6d9a1e15-a5b4-46bd-89d0-92c58b63c416" containerID="1a0912f44fb9243b157508f54544929bbefbd015fb5658e4e03f1dad4ed2596a" exitCode=0 Jan 05 22:10:19 crc kubenswrapper[4910]: I0105 22:10:19.540910 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-x9vtx" event={"ID":"6d9a1e15-a5b4-46bd-89d0-92c58b63c416","Type":"ContainerDied","Data":"1a0912f44fb9243b157508f54544929bbefbd015fb5658e4e03f1dad4ed2596a"} Jan 05 22:10:19 crc kubenswrapper[4910]: I0105 22:10:19.540933 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-x9vtx" event={"ID":"6d9a1e15-a5b4-46bd-89d0-92c58b63c416","Type":"ContainerStarted","Data":"7e2b54417fbf80f319ca721eb5496d4f03d93a913a61da4c8ac1e7ee55b073b5"} Jan 05 22:10:19 crc kubenswrapper[4910]: I0105 22:10:19.542794 4910 generic.go:334] "Generic (PLEG): container finished" podID="46c3eb83-1eeb-4c44-b474-73e50c5afd6e" containerID="90ac5d36dc8692f0d0c91b15160638e2e13a408a70e90435dcbe90ab33a57fe9" exitCode=0 Jan 05 22:10:19 crc kubenswrapper[4910]: I0105 22:10:19.542839 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-8d3f-account-create-update-8rkt2" event={"ID":"46c3eb83-1eeb-4c44-b474-73e50c5afd6e","Type":"ContainerDied","Data":"90ac5d36dc8692f0d0c91b15160638e2e13a408a70e90435dcbe90ab33a57fe9"} Jan 05 22:10:19 crc kubenswrapper[4910]: I0105 22:10:19.558782 4910 generic.go:334] "Generic (PLEG): container finished" podID="910bd239-dc3a-47a1-9a25-5e21046f9725" containerID="1f73c1d8d2fa0e648b4c56ae4c50cff94f88b4a2a3829bb9ebdda57b47083e6c" exitCode=0 Jan 05 22:10:19 crc kubenswrapper[4910]: I0105 22:10:19.558968 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-9c2dv" event={"ID":"910bd239-dc3a-47a1-9a25-5e21046f9725","Type":"ContainerDied","Data":"1f73c1d8d2fa0e648b4c56ae4c50cff94f88b4a2a3829bb9ebdda57b47083e6c"} Jan 05 22:10:19 crc kubenswrapper[4910]: I0105 22:10:19.561498 4910 generic.go:334] "Generic (PLEG): container finished" podID="6dad2940-a273-4f20-90d6-92278f45e92e" containerID="c3d8f1f1aad1dd8d9445060302319a5bd2edb7b74f4c04887e27e4076c5c28f8" exitCode=0 Jan 05 22:10:19 crc kubenswrapper[4910]: I0105 22:10:19.561570 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-xl6z4" event={"ID":"6dad2940-a273-4f20-90d6-92278f45e92e","Type":"ContainerDied","Data":"c3d8f1f1aad1dd8d9445060302319a5bd2edb7b74f4c04887e27e4076c5c28f8"} Jan 05 22:10:19 crc kubenswrapper[4910]: I0105 22:10:19.570375 4910 generic.go:334] "Generic (PLEG): container finished" podID="5129c20f-7129-4dde-b59e-3d2d291c87c1" containerID="99653dd93531799ba7907205bd0f49ec9509e31c306740ccb50671ea23797c38" exitCode=0 Jan 05 22:10:19 crc kubenswrapper[4910]: I0105 22:10:19.570439 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-2c36-account-create-update-x4kcc" event={"ID":"5129c20f-7129-4dde-b59e-3d2d291c87c1","Type":"ContainerDied","Data":"99653dd93531799ba7907205bd0f49ec9509e31c306740ccb50671ea23797c38"} Jan 05 22:10:19 crc kubenswrapper[4910]: I0105 22:10:19.570743 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-2c36-account-create-update-x4kcc" event={"ID":"5129c20f-7129-4dde-b59e-3d2d291c87c1","Type":"ContainerStarted","Data":"5e001fc3d3c419fedf795aa3282ad807353857c6366f85f8aa0c2746de311de0"} Jan 05 22:10:20 crc kubenswrapper[4910]: I0105 22:10:20.594820 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c","Type":"ContainerStarted","Data":"e2ab8a8678a38130f2659e63954e48baae4462647b6604c3ae9b246a148b5a0e"} Jan 05 22:10:20 crc kubenswrapper[4910]: I0105 22:10:20.595237 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c","Type":"ContainerStarted","Data":"0cd732b1f2842a6991bdbdcfda901598d9442e11547c32597338a8fa53a2b375"} Jan 05 22:10:20 crc kubenswrapper[4910]: I0105 22:10:20.595253 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c","Type":"ContainerStarted","Data":"f2769fedd4f026dd164121600d29619d9faa807462e36ef2df370d00a00de88f"} Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.348722 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-ckwvh" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.352924 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-xl6z4" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.364569 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-8d3f-account-create-update-8rkt2" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.366435 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-9c2dv" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.372140 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-x9vtx" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.383402 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5b71-account-create-update-5bc47" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.389701 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-2c36-account-create-update-x4kcc" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.452178 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0284aa1c-869a-43b1-9984-488eaed6ba0b-operator-scripts\") pod \"0284aa1c-869a-43b1-9984-488eaed6ba0b\" (UID: \"0284aa1c-869a-43b1-9984-488eaed6ba0b\") " Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.452245 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zvn2r\" (UniqueName: \"kubernetes.io/projected/6dad2940-a273-4f20-90d6-92278f45e92e-kube-api-access-zvn2r\") pod \"6dad2940-a273-4f20-90d6-92278f45e92e\" (UID: \"6dad2940-a273-4f20-90d6-92278f45e92e\") " Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.452297 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xxgk4\" (UniqueName: \"kubernetes.io/projected/0284aa1c-869a-43b1-9984-488eaed6ba0b-kube-api-access-xxgk4\") pod \"0284aa1c-869a-43b1-9984-488eaed6ba0b\" (UID: \"0284aa1c-869a-43b1-9984-488eaed6ba0b\") " Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.453323 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0284aa1c-869a-43b1-9984-488eaed6ba0b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0284aa1c-869a-43b1-9984-488eaed6ba0b" (UID: "0284aa1c-869a-43b1-9984-488eaed6ba0b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.453652 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6dad2940-a273-4f20-90d6-92278f45e92e-operator-scripts\") pod \"6dad2940-a273-4f20-90d6-92278f45e92e\" (UID: \"6dad2940-a273-4f20-90d6-92278f45e92e\") " Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.454282 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6dad2940-a273-4f20-90d6-92278f45e92e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6dad2940-a273-4f20-90d6-92278f45e92e" (UID: "6dad2940-a273-4f20-90d6-92278f45e92e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.455921 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0284aa1c-869a-43b1-9984-488eaed6ba0b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.455974 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6dad2940-a273-4f20-90d6-92278f45e92e-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.484360 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0284aa1c-869a-43b1-9984-488eaed6ba0b-kube-api-access-xxgk4" (OuterVolumeSpecName: "kube-api-access-xxgk4") pod "0284aa1c-869a-43b1-9984-488eaed6ba0b" (UID: "0284aa1c-869a-43b1-9984-488eaed6ba0b"). InnerVolumeSpecName "kube-api-access-xxgk4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.491575 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6dad2940-a273-4f20-90d6-92278f45e92e-kube-api-access-zvn2r" (OuterVolumeSpecName: "kube-api-access-zvn2r") pod "6dad2940-a273-4f20-90d6-92278f45e92e" (UID: "6dad2940-a273-4f20-90d6-92278f45e92e"). InnerVolumeSpecName "kube-api-access-zvn2r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.558752 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ea2fcf34-e416-43a2-a488-a9e952d19b81-operator-scripts\") pod \"ea2fcf34-e416-43a2-a488-a9e952d19b81\" (UID: \"ea2fcf34-e416-43a2-a488-a9e952d19b81\") " Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.558837 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-84wrj\" (UniqueName: \"kubernetes.io/projected/5129c20f-7129-4dde-b59e-3d2d291c87c1-kube-api-access-84wrj\") pod \"5129c20f-7129-4dde-b59e-3d2d291c87c1\" (UID: \"5129c20f-7129-4dde-b59e-3d2d291c87c1\") " Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.558861 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5129c20f-7129-4dde-b59e-3d2d291c87c1-operator-scripts\") pod \"5129c20f-7129-4dde-b59e-3d2d291c87c1\" (UID: \"5129c20f-7129-4dde-b59e-3d2d291c87c1\") " Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.558888 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xssrs\" (UniqueName: \"kubernetes.io/projected/ea2fcf34-e416-43a2-a488-a9e952d19b81-kube-api-access-xssrs\") pod \"ea2fcf34-e416-43a2-a488-a9e952d19b81\" (UID: \"ea2fcf34-e416-43a2-a488-a9e952d19b81\") " Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.558914 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jjwqw\" (UniqueName: \"kubernetes.io/projected/910bd239-dc3a-47a1-9a25-5e21046f9725-kube-api-access-jjwqw\") pod \"910bd239-dc3a-47a1-9a25-5e21046f9725\" (UID: \"910bd239-dc3a-47a1-9a25-5e21046f9725\") " Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.558975 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46c3eb83-1eeb-4c44-b474-73e50c5afd6e-operator-scripts\") pod \"46c3eb83-1eeb-4c44-b474-73e50c5afd6e\" (UID: \"46c3eb83-1eeb-4c44-b474-73e50c5afd6e\") " Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.559029 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t5hgv\" (UniqueName: \"kubernetes.io/projected/6d9a1e15-a5b4-46bd-89d0-92c58b63c416-kube-api-access-t5hgv\") pod \"6d9a1e15-a5b4-46bd-89d0-92c58b63c416\" (UID: \"6d9a1e15-a5b4-46bd-89d0-92c58b63c416\") " Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.559086 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d9a1e15-a5b4-46bd-89d0-92c58b63c416-operator-scripts\") pod \"6d9a1e15-a5b4-46bd-89d0-92c58b63c416\" (UID: \"6d9a1e15-a5b4-46bd-89d0-92c58b63c416\") " Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.559114 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/910bd239-dc3a-47a1-9a25-5e21046f9725-operator-scripts\") pod \"910bd239-dc3a-47a1-9a25-5e21046f9725\" (UID: \"910bd239-dc3a-47a1-9a25-5e21046f9725\") " Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.559152 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8h9x2\" (UniqueName: \"kubernetes.io/projected/46c3eb83-1eeb-4c44-b474-73e50c5afd6e-kube-api-access-8h9x2\") pod \"46c3eb83-1eeb-4c44-b474-73e50c5afd6e\" (UID: \"46c3eb83-1eeb-4c44-b474-73e50c5afd6e\") " Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.559266 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea2fcf34-e416-43a2-a488-a9e952d19b81-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ea2fcf34-e416-43a2-a488-a9e952d19b81" (UID: "ea2fcf34-e416-43a2-a488-a9e952d19b81"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.559299 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5129c20f-7129-4dde-b59e-3d2d291c87c1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5129c20f-7129-4dde-b59e-3d2d291c87c1" (UID: "5129c20f-7129-4dde-b59e-3d2d291c87c1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.559536 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/46c3eb83-1eeb-4c44-b474-73e50c5afd6e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "46c3eb83-1eeb-4c44-b474-73e50c5afd6e" (UID: "46c3eb83-1eeb-4c44-b474-73e50c5afd6e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.559582 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zvn2r\" (UniqueName: \"kubernetes.io/projected/6dad2940-a273-4f20-90d6-92278f45e92e-kube-api-access-zvn2r\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.559597 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xxgk4\" (UniqueName: \"kubernetes.io/projected/0284aa1c-869a-43b1-9984-488eaed6ba0b-kube-api-access-xxgk4\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.559606 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ea2fcf34-e416-43a2-a488-a9e952d19b81-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.559614 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5129c20f-7129-4dde-b59e-3d2d291c87c1-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.560059 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d9a1e15-a5b4-46bd-89d0-92c58b63c416-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6d9a1e15-a5b4-46bd-89d0-92c58b63c416" (UID: "6d9a1e15-a5b4-46bd-89d0-92c58b63c416"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.560487 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/910bd239-dc3a-47a1-9a25-5e21046f9725-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "910bd239-dc3a-47a1-9a25-5e21046f9725" (UID: "910bd239-dc3a-47a1-9a25-5e21046f9725"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.562678 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea2fcf34-e416-43a2-a488-a9e952d19b81-kube-api-access-xssrs" (OuterVolumeSpecName: "kube-api-access-xssrs") pod "ea2fcf34-e416-43a2-a488-a9e952d19b81" (UID: "ea2fcf34-e416-43a2-a488-a9e952d19b81"). InnerVolumeSpecName "kube-api-access-xssrs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.563483 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46c3eb83-1eeb-4c44-b474-73e50c5afd6e-kube-api-access-8h9x2" (OuterVolumeSpecName: "kube-api-access-8h9x2") pod "46c3eb83-1eeb-4c44-b474-73e50c5afd6e" (UID: "46c3eb83-1eeb-4c44-b474-73e50c5afd6e"). InnerVolumeSpecName "kube-api-access-8h9x2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.568250 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d9a1e15-a5b4-46bd-89d0-92c58b63c416-kube-api-access-t5hgv" (OuterVolumeSpecName: "kube-api-access-t5hgv") pod "6d9a1e15-a5b4-46bd-89d0-92c58b63c416" (UID: "6d9a1e15-a5b4-46bd-89d0-92c58b63c416"). InnerVolumeSpecName "kube-api-access-t5hgv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.568391 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/910bd239-dc3a-47a1-9a25-5e21046f9725-kube-api-access-jjwqw" (OuterVolumeSpecName: "kube-api-access-jjwqw") pod "910bd239-dc3a-47a1-9a25-5e21046f9725" (UID: "910bd239-dc3a-47a1-9a25-5e21046f9725"). InnerVolumeSpecName "kube-api-access-jjwqw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.580034 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5129c20f-7129-4dde-b59e-3d2d291c87c1-kube-api-access-84wrj" (OuterVolumeSpecName: "kube-api-access-84wrj") pod "5129c20f-7129-4dde-b59e-3d2d291c87c1" (UID: "5129c20f-7129-4dde-b59e-3d2d291c87c1"). InnerVolumeSpecName "kube-api-access-84wrj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.612478 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-xl6z4" event={"ID":"6dad2940-a273-4f20-90d6-92278f45e92e","Type":"ContainerDied","Data":"389dfae3bc984652e49bea888326a6619b7a3288e94ff3d91f60dd7bab756a1d"} Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.612531 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="389dfae3bc984652e49bea888326a6619b7a3288e94ff3d91f60dd7bab756a1d" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.612561 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-xl6z4" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.614359 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-ckwvh" event={"ID":"0284aa1c-869a-43b1-9984-488eaed6ba0b","Type":"ContainerDied","Data":"411d843e3e4478758a3efc9c74fbd32918b56c14b6b02c260fa84ebb89b093b1"} Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.614405 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="411d843e3e4478758a3efc9c74fbd32918b56c14b6b02c260fa84ebb89b093b1" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.614573 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-ckwvh" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.615955 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5b71-account-create-update-5bc47" event={"ID":"ea2fcf34-e416-43a2-a488-a9e952d19b81","Type":"ContainerDied","Data":"29a8bfd2df3b4c9d31f40a265bac78979d6eea42d86fa09c08f78a180b42f497"} Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.615977 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5b71-account-create-update-5bc47" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.615988 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="29a8bfd2df3b4c9d31f40a265bac78979d6eea42d86fa09c08f78a180b42f497" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.617704 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-x9vtx" event={"ID":"6d9a1e15-a5b4-46bd-89d0-92c58b63c416","Type":"ContainerDied","Data":"7e2b54417fbf80f319ca721eb5496d4f03d93a913a61da4c8ac1e7ee55b073b5"} Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.617732 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7e2b54417fbf80f319ca721eb5496d4f03d93a913a61da4c8ac1e7ee55b073b5" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.617737 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-x9vtx" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.619156 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-8d3f-account-create-update-8rkt2" event={"ID":"46c3eb83-1eeb-4c44-b474-73e50c5afd6e","Type":"ContainerDied","Data":"209dfa34df38a4d3156ed7342b8be52ace2d9dc012a13b7fe2943a10ddc1aeb7"} Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.619180 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="209dfa34df38a4d3156ed7342b8be52ace2d9dc012a13b7fe2943a10ddc1aeb7" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.619252 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-8d3f-account-create-update-8rkt2" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.625390 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-9c2dv" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.625412 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-9c2dv" event={"ID":"910bd239-dc3a-47a1-9a25-5e21046f9725","Type":"ContainerDied","Data":"f7898c66ed849b43946cdda47f8cea1b9b69bb53f5a4e9fad076f375d7c65933"} Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.625444 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f7898c66ed849b43946cdda47f8cea1b9b69bb53f5a4e9fad076f375d7c65933" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.627987 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-2c36-account-create-update-x4kcc" event={"ID":"5129c20f-7129-4dde-b59e-3d2d291c87c1","Type":"ContainerDied","Data":"5e001fc3d3c419fedf795aa3282ad807353857c6366f85f8aa0c2746de311de0"} Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.628321 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5e001fc3d3c419fedf795aa3282ad807353857c6366f85f8aa0c2746de311de0" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.628385 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-2c36-account-create-update-x4kcc" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.665597 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t5hgv\" (UniqueName: \"kubernetes.io/projected/6d9a1e15-a5b4-46bd-89d0-92c58b63c416-kube-api-access-t5hgv\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.665678 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d9a1e15-a5b4-46bd-89d0-92c58b63c416-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.665693 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/910bd239-dc3a-47a1-9a25-5e21046f9725-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.665706 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8h9x2\" (UniqueName: \"kubernetes.io/projected/46c3eb83-1eeb-4c44-b474-73e50c5afd6e-kube-api-access-8h9x2\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.665994 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-84wrj\" (UniqueName: \"kubernetes.io/projected/5129c20f-7129-4dde-b59e-3d2d291c87c1-kube-api-access-84wrj\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.666012 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xssrs\" (UniqueName: \"kubernetes.io/projected/ea2fcf34-e416-43a2-a488-a9e952d19b81-kube-api-access-xssrs\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.666027 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jjwqw\" (UniqueName: \"kubernetes.io/projected/910bd239-dc3a-47a1-9a25-5e21046f9725-kube-api-access-jjwqw\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.666042 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46c3eb83-1eeb-4c44-b474-73e50c5afd6e-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:22 crc kubenswrapper[4910]: I0105 22:10:22.940038 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Jan 05 22:10:23 crc kubenswrapper[4910]: I0105 22:10:23.460611 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-cfp97" podUID="9253fb1e-9dce-4e54-80ee-fba5e3152596" containerName="ovn-controller" probeResult="failure" output=< Jan 05 22:10:23 crc kubenswrapper[4910]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 05 22:10:23 crc kubenswrapper[4910]: > Jan 05 22:10:24 crc kubenswrapper[4910]: I0105 22:10:24.406250 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="7e2a3efd-2de7-493e-af91-900b224e5313" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.100:5671: connect: connection refused" Jan 05 22:10:24 crc kubenswrapper[4910]: I0105 22:10:24.648931 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c","Type":"ContainerStarted","Data":"2129598e625a213cd3ba79ba7fbea1e290f821367d76513b2babda344dd0d56d"} Jan 05 22:10:24 crc kubenswrapper[4910]: I0105 22:10:24.780476 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="b9cedfb5-8c45-434f-b04d-694bf6d600b8" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.101:5671: connect: connection refused" Jan 05 22:10:26 crc kubenswrapper[4910]: I0105 22:10:26.352208 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-xl6z4"] Jan 05 22:10:26 crc kubenswrapper[4910]: I0105 22:10:26.358821 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-xl6z4"] Jan 05 22:10:26 crc kubenswrapper[4910]: I0105 22:10:26.667043 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c","Type":"ContainerStarted","Data":"187beb52e9b46c05114bf6a7d8a6f124abb0c3eca374625c1e25c808968452b8"} Jan 05 22:10:26 crc kubenswrapper[4910]: I0105 22:10:26.667093 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c","Type":"ContainerStarted","Data":"030ff0b9ff130c75ae6701e006d9210557f59ef3c03a7bb98a7ca430e97109c9"} Jan 05 22:10:26 crc kubenswrapper[4910]: I0105 22:10:26.667104 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c","Type":"ContainerStarted","Data":"68e3c9997f2af7b46038842848451d18323b773c525c082d57ad2f0fb30df5ed"} Jan 05 22:10:26 crc kubenswrapper[4910]: I0105 22:10:26.667112 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c","Type":"ContainerStarted","Data":"ab943cdf655ff0be681e72dca8f34c8ac3fb8d5e0e2a1b8ed872d453cb2ea0d6"} Jan 05 22:10:26 crc kubenswrapper[4910]: I0105 22:10:26.732065 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6dad2940-a273-4f20-90d6-92278f45e92e" path="/var/lib/kubelet/pods/6dad2940-a273-4f20-90d6-92278f45e92e/volumes" Jan 05 22:10:27 crc kubenswrapper[4910]: I0105 22:10:27.864756 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-75wjt"] Jan 05 22:10:27 crc kubenswrapper[4910]: E0105 22:10:27.865678 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5129c20f-7129-4dde-b59e-3d2d291c87c1" containerName="mariadb-account-create-update" Jan 05 22:10:27 crc kubenswrapper[4910]: I0105 22:10:27.865696 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="5129c20f-7129-4dde-b59e-3d2d291c87c1" containerName="mariadb-account-create-update" Jan 05 22:10:27 crc kubenswrapper[4910]: E0105 22:10:27.865712 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="910bd239-dc3a-47a1-9a25-5e21046f9725" containerName="mariadb-database-create" Jan 05 22:10:27 crc kubenswrapper[4910]: I0105 22:10:27.865719 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="910bd239-dc3a-47a1-9a25-5e21046f9725" containerName="mariadb-database-create" Jan 05 22:10:27 crc kubenswrapper[4910]: E0105 22:10:27.865737 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea2fcf34-e416-43a2-a488-a9e952d19b81" containerName="mariadb-account-create-update" Jan 05 22:10:27 crc kubenswrapper[4910]: I0105 22:10:27.865744 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea2fcf34-e416-43a2-a488-a9e952d19b81" containerName="mariadb-account-create-update" Jan 05 22:10:27 crc kubenswrapper[4910]: E0105 22:10:27.865758 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6dad2940-a273-4f20-90d6-92278f45e92e" containerName="mariadb-account-create-update" Jan 05 22:10:27 crc kubenswrapper[4910]: I0105 22:10:27.865765 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="6dad2940-a273-4f20-90d6-92278f45e92e" containerName="mariadb-account-create-update" Jan 05 22:10:27 crc kubenswrapper[4910]: E0105 22:10:27.865781 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c3eb83-1eeb-4c44-b474-73e50c5afd6e" containerName="mariadb-account-create-update" Jan 05 22:10:27 crc kubenswrapper[4910]: I0105 22:10:27.865788 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c3eb83-1eeb-4c44-b474-73e50c5afd6e" containerName="mariadb-account-create-update" Jan 05 22:10:27 crc kubenswrapper[4910]: E0105 22:10:27.866012 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d9a1e15-a5b4-46bd-89d0-92c58b63c416" containerName="mariadb-database-create" Jan 05 22:10:27 crc kubenswrapper[4910]: I0105 22:10:27.866023 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d9a1e15-a5b4-46bd-89d0-92c58b63c416" containerName="mariadb-database-create" Jan 05 22:10:27 crc kubenswrapper[4910]: E0105 22:10:27.866035 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0284aa1c-869a-43b1-9984-488eaed6ba0b" containerName="mariadb-database-create" Jan 05 22:10:27 crc kubenswrapper[4910]: I0105 22:10:27.866041 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="0284aa1c-869a-43b1-9984-488eaed6ba0b" containerName="mariadb-database-create" Jan 05 22:10:27 crc kubenswrapper[4910]: I0105 22:10:27.866243 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="6dad2940-a273-4f20-90d6-92278f45e92e" containerName="mariadb-account-create-update" Jan 05 22:10:27 crc kubenswrapper[4910]: I0105 22:10:27.866263 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="46c3eb83-1eeb-4c44-b474-73e50c5afd6e" containerName="mariadb-account-create-update" Jan 05 22:10:27 crc kubenswrapper[4910]: I0105 22:10:27.866278 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="5129c20f-7129-4dde-b59e-3d2d291c87c1" containerName="mariadb-account-create-update" Jan 05 22:10:27 crc kubenswrapper[4910]: I0105 22:10:27.866289 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea2fcf34-e416-43a2-a488-a9e952d19b81" containerName="mariadb-account-create-update" Jan 05 22:10:27 crc kubenswrapper[4910]: I0105 22:10:27.866297 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d9a1e15-a5b4-46bd-89d0-92c58b63c416" containerName="mariadb-database-create" Jan 05 22:10:27 crc kubenswrapper[4910]: I0105 22:10:27.866313 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="0284aa1c-869a-43b1-9984-488eaed6ba0b" containerName="mariadb-database-create" Jan 05 22:10:27 crc kubenswrapper[4910]: I0105 22:10:27.866321 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="910bd239-dc3a-47a1-9a25-5e21046f9725" containerName="mariadb-database-create" Jan 05 22:10:27 crc kubenswrapper[4910]: I0105 22:10:27.866892 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-75wjt" Jan 05 22:10:27 crc kubenswrapper[4910]: I0105 22:10:27.870070 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-fks6q" Jan 05 22:10:27 crc kubenswrapper[4910]: I0105 22:10:27.870364 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Jan 05 22:10:27 crc kubenswrapper[4910]: I0105 22:10:27.877677 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-75wjt"] Jan 05 22:10:27 crc kubenswrapper[4910]: I0105 22:10:27.982958 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2kxkn\" (UniqueName: \"kubernetes.io/projected/569058f0-d9dd-45de-a0ce-dd38bb6ce341-kube-api-access-2kxkn\") pod \"glance-db-sync-75wjt\" (UID: \"569058f0-d9dd-45de-a0ce-dd38bb6ce341\") " pod="openstack/glance-db-sync-75wjt" Jan 05 22:10:27 crc kubenswrapper[4910]: I0105 22:10:27.983050 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/569058f0-d9dd-45de-a0ce-dd38bb6ce341-db-sync-config-data\") pod \"glance-db-sync-75wjt\" (UID: \"569058f0-d9dd-45de-a0ce-dd38bb6ce341\") " pod="openstack/glance-db-sync-75wjt" Jan 05 22:10:27 crc kubenswrapper[4910]: I0105 22:10:27.983120 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/569058f0-d9dd-45de-a0ce-dd38bb6ce341-config-data\") pod \"glance-db-sync-75wjt\" (UID: \"569058f0-d9dd-45de-a0ce-dd38bb6ce341\") " pod="openstack/glance-db-sync-75wjt" Jan 05 22:10:27 crc kubenswrapper[4910]: I0105 22:10:27.983309 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/569058f0-d9dd-45de-a0ce-dd38bb6ce341-combined-ca-bundle\") pod \"glance-db-sync-75wjt\" (UID: \"569058f0-d9dd-45de-a0ce-dd38bb6ce341\") " pod="openstack/glance-db-sync-75wjt" Jan 05 22:10:28 crc kubenswrapper[4910]: I0105 22:10:28.085425 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/569058f0-d9dd-45de-a0ce-dd38bb6ce341-combined-ca-bundle\") pod \"glance-db-sync-75wjt\" (UID: \"569058f0-d9dd-45de-a0ce-dd38bb6ce341\") " pod="openstack/glance-db-sync-75wjt" Jan 05 22:10:28 crc kubenswrapper[4910]: I0105 22:10:28.085638 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2kxkn\" (UniqueName: \"kubernetes.io/projected/569058f0-d9dd-45de-a0ce-dd38bb6ce341-kube-api-access-2kxkn\") pod \"glance-db-sync-75wjt\" (UID: \"569058f0-d9dd-45de-a0ce-dd38bb6ce341\") " pod="openstack/glance-db-sync-75wjt" Jan 05 22:10:28 crc kubenswrapper[4910]: I0105 22:10:28.085718 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/569058f0-d9dd-45de-a0ce-dd38bb6ce341-db-sync-config-data\") pod \"glance-db-sync-75wjt\" (UID: \"569058f0-d9dd-45de-a0ce-dd38bb6ce341\") " pod="openstack/glance-db-sync-75wjt" Jan 05 22:10:28 crc kubenswrapper[4910]: I0105 22:10:28.085760 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/569058f0-d9dd-45de-a0ce-dd38bb6ce341-config-data\") pod \"glance-db-sync-75wjt\" (UID: \"569058f0-d9dd-45de-a0ce-dd38bb6ce341\") " pod="openstack/glance-db-sync-75wjt" Jan 05 22:10:28 crc kubenswrapper[4910]: I0105 22:10:28.107556 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/569058f0-d9dd-45de-a0ce-dd38bb6ce341-config-data\") pod \"glance-db-sync-75wjt\" (UID: \"569058f0-d9dd-45de-a0ce-dd38bb6ce341\") " pod="openstack/glance-db-sync-75wjt" Jan 05 22:10:28 crc kubenswrapper[4910]: I0105 22:10:28.110919 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/569058f0-d9dd-45de-a0ce-dd38bb6ce341-db-sync-config-data\") pod \"glance-db-sync-75wjt\" (UID: \"569058f0-d9dd-45de-a0ce-dd38bb6ce341\") " pod="openstack/glance-db-sync-75wjt" Jan 05 22:10:28 crc kubenswrapper[4910]: I0105 22:10:28.112935 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/569058f0-d9dd-45de-a0ce-dd38bb6ce341-combined-ca-bundle\") pod \"glance-db-sync-75wjt\" (UID: \"569058f0-d9dd-45de-a0ce-dd38bb6ce341\") " pod="openstack/glance-db-sync-75wjt" Jan 05 22:10:28 crc kubenswrapper[4910]: I0105 22:10:28.127983 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2kxkn\" (UniqueName: \"kubernetes.io/projected/569058f0-d9dd-45de-a0ce-dd38bb6ce341-kube-api-access-2kxkn\") pod \"glance-db-sync-75wjt\" (UID: \"569058f0-d9dd-45de-a0ce-dd38bb6ce341\") " pod="openstack/glance-db-sync-75wjt" Jan 05 22:10:28 crc kubenswrapper[4910]: I0105 22:10:28.197759 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-75wjt" Jan 05 22:10:28 crc kubenswrapper[4910]: I0105 22:10:28.449449 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-cfp97" podUID="9253fb1e-9dce-4e54-80ee-fba5e3152596" containerName="ovn-controller" probeResult="failure" output=< Jan 05 22:10:28 crc kubenswrapper[4910]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 05 22:10:28 crc kubenswrapper[4910]: > Jan 05 22:10:28 crc kubenswrapper[4910]: I0105 22:10:28.548937 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-9g2kt" Jan 05 22:10:28 crc kubenswrapper[4910]: I0105 22:10:28.570624 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-9g2kt" Jan 05 22:10:28 crc kubenswrapper[4910]: I0105 22:10:28.974285 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-cfp97-config-pgzvd"] Jan 05 22:10:28 crc kubenswrapper[4910]: I0105 22:10:28.975952 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-cfp97-config-pgzvd" Jan 05 22:10:29 crc kubenswrapper[4910]: I0105 22:10:29.004256 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 05 22:10:29 crc kubenswrapper[4910]: I0105 22:10:29.006990 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-cfp97-config-pgzvd"] Jan 05 22:10:29 crc kubenswrapper[4910]: I0105 22:10:29.114224 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jkfq\" (UniqueName: \"kubernetes.io/projected/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-kube-api-access-9jkfq\") pod \"ovn-controller-cfp97-config-pgzvd\" (UID: \"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6\") " pod="openstack/ovn-controller-cfp97-config-pgzvd" Jan 05 22:10:29 crc kubenswrapper[4910]: I0105 22:10:29.114481 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-var-log-ovn\") pod \"ovn-controller-cfp97-config-pgzvd\" (UID: \"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6\") " pod="openstack/ovn-controller-cfp97-config-pgzvd" Jan 05 22:10:29 crc kubenswrapper[4910]: I0105 22:10:29.114573 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-var-run\") pod \"ovn-controller-cfp97-config-pgzvd\" (UID: \"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6\") " pod="openstack/ovn-controller-cfp97-config-pgzvd" Jan 05 22:10:29 crc kubenswrapper[4910]: I0105 22:10:29.114784 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-var-run-ovn\") pod \"ovn-controller-cfp97-config-pgzvd\" (UID: \"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6\") " pod="openstack/ovn-controller-cfp97-config-pgzvd" Jan 05 22:10:29 crc kubenswrapper[4910]: I0105 22:10:29.114835 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-scripts\") pod \"ovn-controller-cfp97-config-pgzvd\" (UID: \"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6\") " pod="openstack/ovn-controller-cfp97-config-pgzvd" Jan 05 22:10:29 crc kubenswrapper[4910]: I0105 22:10:29.114893 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-additional-scripts\") pod \"ovn-controller-cfp97-config-pgzvd\" (UID: \"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6\") " pod="openstack/ovn-controller-cfp97-config-pgzvd" Jan 05 22:10:29 crc kubenswrapper[4910]: I0105 22:10:29.216512 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-var-run\") pod \"ovn-controller-cfp97-config-pgzvd\" (UID: \"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6\") " pod="openstack/ovn-controller-cfp97-config-pgzvd" Jan 05 22:10:29 crc kubenswrapper[4910]: I0105 22:10:29.216998 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-var-run-ovn\") pod \"ovn-controller-cfp97-config-pgzvd\" (UID: \"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6\") " pod="openstack/ovn-controller-cfp97-config-pgzvd" Jan 05 22:10:29 crc kubenswrapper[4910]: I0105 22:10:29.217028 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-scripts\") pod \"ovn-controller-cfp97-config-pgzvd\" (UID: \"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6\") " pod="openstack/ovn-controller-cfp97-config-pgzvd" Jan 05 22:10:29 crc kubenswrapper[4910]: I0105 22:10:29.217077 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-additional-scripts\") pod \"ovn-controller-cfp97-config-pgzvd\" (UID: \"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6\") " pod="openstack/ovn-controller-cfp97-config-pgzvd" Jan 05 22:10:29 crc kubenswrapper[4910]: I0105 22:10:29.217183 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jkfq\" (UniqueName: \"kubernetes.io/projected/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-kube-api-access-9jkfq\") pod \"ovn-controller-cfp97-config-pgzvd\" (UID: \"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6\") " pod="openstack/ovn-controller-cfp97-config-pgzvd" Jan 05 22:10:29 crc kubenswrapper[4910]: I0105 22:10:29.217220 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-var-log-ovn\") pod \"ovn-controller-cfp97-config-pgzvd\" (UID: \"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6\") " pod="openstack/ovn-controller-cfp97-config-pgzvd" Jan 05 22:10:29 crc kubenswrapper[4910]: I0105 22:10:29.217210 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-var-run\") pod \"ovn-controller-cfp97-config-pgzvd\" (UID: \"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6\") " pod="openstack/ovn-controller-cfp97-config-pgzvd" Jan 05 22:10:29 crc kubenswrapper[4910]: I0105 22:10:29.217691 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-var-run-ovn\") pod \"ovn-controller-cfp97-config-pgzvd\" (UID: \"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6\") " pod="openstack/ovn-controller-cfp97-config-pgzvd" Jan 05 22:10:29 crc kubenswrapper[4910]: I0105 22:10:29.218048 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-var-log-ovn\") pod \"ovn-controller-cfp97-config-pgzvd\" (UID: \"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6\") " pod="openstack/ovn-controller-cfp97-config-pgzvd" Jan 05 22:10:29 crc kubenswrapper[4910]: I0105 22:10:29.218109 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-additional-scripts\") pod \"ovn-controller-cfp97-config-pgzvd\" (UID: \"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6\") " pod="openstack/ovn-controller-cfp97-config-pgzvd" Jan 05 22:10:29 crc kubenswrapper[4910]: I0105 22:10:29.219415 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-scripts\") pod \"ovn-controller-cfp97-config-pgzvd\" (UID: \"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6\") " pod="openstack/ovn-controller-cfp97-config-pgzvd" Jan 05 22:10:29 crc kubenswrapper[4910]: I0105 22:10:29.248130 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jkfq\" (UniqueName: \"kubernetes.io/projected/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-kube-api-access-9jkfq\") pod \"ovn-controller-cfp97-config-pgzvd\" (UID: \"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6\") " pod="openstack/ovn-controller-cfp97-config-pgzvd" Jan 05 22:10:29 crc kubenswrapper[4910]: I0105 22:10:29.308316 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-75wjt"] Jan 05 22:10:29 crc kubenswrapper[4910]: I0105 22:10:29.328860 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-cfp97-config-pgzvd" Jan 05 22:10:29 crc kubenswrapper[4910]: I0105 22:10:29.719374 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c","Type":"ContainerStarted","Data":"5739ed0c5ca6cc3d18fd000f35d28e3755ecb57c1feed925bbcdda9a4d46f763"} Jan 05 22:10:29 crc kubenswrapper[4910]: I0105 22:10:29.720415 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c","Type":"ContainerStarted","Data":"5fa5b197746f4fc6c232971216c9a644e9ab975e961e0c935229cf38a4e633b6"} Jan 05 22:10:29 crc kubenswrapper[4910]: I0105 22:10:29.720605 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c","Type":"ContainerStarted","Data":"1614de421c052452069aee80467540af7a4813e1f57aea4bdd99541595f16624"} Jan 05 22:10:29 crc kubenswrapper[4910]: I0105 22:10:29.721578 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-75wjt" event={"ID":"569058f0-d9dd-45de-a0ce-dd38bb6ce341","Type":"ContainerStarted","Data":"e1a5d79bebceff73833872a9b2bf9fbbf2fd0129b593572bba9ee308f6fc3b5b"} Jan 05 22:10:29 crc kubenswrapper[4910]: W0105 22:10:29.903510 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddcb86919_a2a1_4563_a4c1_ad2c6c7efdf6.slice/crio-5909cb0afb9290b7aa52f96eaa61c0bc16442abb5f191d11efed7a4d04d9930a WatchSource:0}: Error finding container 5909cb0afb9290b7aa52f96eaa61c0bc16442abb5f191d11efed7a4d04d9930a: Status 404 returned error can't find the container with id 5909cb0afb9290b7aa52f96eaa61c0bc16442abb5f191d11efed7a4d04d9930a Jan 05 22:10:29 crc kubenswrapper[4910]: I0105 22:10:29.910348 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-cfp97-config-pgzvd"] Jan 05 22:10:30 crc kubenswrapper[4910]: I0105 22:10:30.743370 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c","Type":"ContainerStarted","Data":"9b00f43f9dea3110ed5f648eaaad264722a104ce4177678f5dfd3b49816ef94f"} Jan 05 22:10:30 crc kubenswrapper[4910]: I0105 22:10:30.744101 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c","Type":"ContainerStarted","Data":"16a4b970b359fbc6fc563656363aa95e36f86df606367a79bbe2212753463870"} Jan 05 22:10:30 crc kubenswrapper[4910]: I0105 22:10:30.744147 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c","Type":"ContainerStarted","Data":"831fd24da04e59f5338c337a32590b1382ff92df1e292249c377b504749c88e0"} Jan 05 22:10:30 crc kubenswrapper[4910]: I0105 22:10:30.744158 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c","Type":"ContainerStarted","Data":"003b527dd2c8b643268b3cff916e9f4b6fbe1f8126957b42aa16a3434a320025"} Jan 05 22:10:30 crc kubenswrapper[4910]: I0105 22:10:30.748971 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-cfp97-config-pgzvd" event={"ID":"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6","Type":"ContainerStarted","Data":"205d4b46e7bd413f6db4358dc1c3d1c4cb04c634807f6f47aed239d5fff6aad0"} Jan 05 22:10:30 crc kubenswrapper[4910]: I0105 22:10:30.749017 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-cfp97-config-pgzvd" event={"ID":"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6","Type":"ContainerStarted","Data":"5909cb0afb9290b7aa52f96eaa61c0bc16442abb5f191d11efed7a4d04d9930a"} Jan 05 22:10:30 crc kubenswrapper[4910]: I0105 22:10:30.784881 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=20.596253055 podStartE2EDuration="31.784860339s" podCreationTimestamp="2026-01-05 22:09:59 +0000 UTC" firstStartedPulling="2026-01-05 22:10:17.520700015 +0000 UTC m=+1149.098197685" lastFinishedPulling="2026-01-05 22:10:28.709307299 +0000 UTC m=+1160.286804969" observedRunningTime="2026-01-05 22:10:30.775563884 +0000 UTC m=+1162.353061554" watchObservedRunningTime="2026-01-05 22:10:30.784860339 +0000 UTC m=+1162.362358009" Jan 05 22:10:30 crc kubenswrapper[4910]: I0105 22:10:30.810278 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-cfp97-config-pgzvd" podStartSLOduration=2.810256912 podStartE2EDuration="2.810256912s" podCreationTimestamp="2026-01-05 22:10:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:10:30.806663645 +0000 UTC m=+1162.384161315" watchObservedRunningTime="2026-01-05 22:10:30.810256912 +0000 UTC m=+1162.387754582" Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.056746 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-75bdffd66f-fxgpr"] Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.058057 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.060079 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.081963 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75bdffd66f-fxgpr"] Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.157175 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/853c3a83-badd-474e-b356-5034158f9450-ovsdbserver-nb\") pod \"dnsmasq-dns-75bdffd66f-fxgpr\" (UID: \"853c3a83-badd-474e-b356-5034158f9450\") " pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.157231 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/853c3a83-badd-474e-b356-5034158f9450-config\") pod \"dnsmasq-dns-75bdffd66f-fxgpr\" (UID: \"853c3a83-badd-474e-b356-5034158f9450\") " pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.157276 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/853c3a83-badd-474e-b356-5034158f9450-ovsdbserver-sb\") pod \"dnsmasq-dns-75bdffd66f-fxgpr\" (UID: \"853c3a83-badd-474e-b356-5034158f9450\") " pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.157513 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/853c3a83-badd-474e-b356-5034158f9450-dns-svc\") pod \"dnsmasq-dns-75bdffd66f-fxgpr\" (UID: \"853c3a83-badd-474e-b356-5034158f9450\") " pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.157781 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/853c3a83-badd-474e-b356-5034158f9450-dns-swift-storage-0\") pod \"dnsmasq-dns-75bdffd66f-fxgpr\" (UID: \"853c3a83-badd-474e-b356-5034158f9450\") " pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.157969 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kkdlg\" (UniqueName: \"kubernetes.io/projected/853c3a83-badd-474e-b356-5034158f9450-kube-api-access-kkdlg\") pod \"dnsmasq-dns-75bdffd66f-fxgpr\" (UID: \"853c3a83-badd-474e-b356-5034158f9450\") " pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.266693 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/853c3a83-badd-474e-b356-5034158f9450-dns-svc\") pod \"dnsmasq-dns-75bdffd66f-fxgpr\" (UID: \"853c3a83-badd-474e-b356-5034158f9450\") " pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.266950 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/853c3a83-badd-474e-b356-5034158f9450-dns-swift-storage-0\") pod \"dnsmasq-dns-75bdffd66f-fxgpr\" (UID: \"853c3a83-badd-474e-b356-5034158f9450\") " pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.267114 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kkdlg\" (UniqueName: \"kubernetes.io/projected/853c3a83-badd-474e-b356-5034158f9450-kube-api-access-kkdlg\") pod \"dnsmasq-dns-75bdffd66f-fxgpr\" (UID: \"853c3a83-badd-474e-b356-5034158f9450\") " pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.267343 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/853c3a83-badd-474e-b356-5034158f9450-ovsdbserver-nb\") pod \"dnsmasq-dns-75bdffd66f-fxgpr\" (UID: \"853c3a83-badd-474e-b356-5034158f9450\") " pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.267383 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/853c3a83-badd-474e-b356-5034158f9450-config\") pod \"dnsmasq-dns-75bdffd66f-fxgpr\" (UID: \"853c3a83-badd-474e-b356-5034158f9450\") " pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.267506 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/853c3a83-badd-474e-b356-5034158f9450-ovsdbserver-sb\") pod \"dnsmasq-dns-75bdffd66f-fxgpr\" (UID: \"853c3a83-badd-474e-b356-5034158f9450\") " pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.268097 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/853c3a83-badd-474e-b356-5034158f9450-ovsdbserver-nb\") pod \"dnsmasq-dns-75bdffd66f-fxgpr\" (UID: \"853c3a83-badd-474e-b356-5034158f9450\") " pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.268103 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/853c3a83-badd-474e-b356-5034158f9450-dns-svc\") pod \"dnsmasq-dns-75bdffd66f-fxgpr\" (UID: \"853c3a83-badd-474e-b356-5034158f9450\") " pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.268363 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/853c3a83-badd-474e-b356-5034158f9450-ovsdbserver-sb\") pod \"dnsmasq-dns-75bdffd66f-fxgpr\" (UID: \"853c3a83-badd-474e-b356-5034158f9450\") " pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.268633 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/853c3a83-badd-474e-b356-5034158f9450-config\") pod \"dnsmasq-dns-75bdffd66f-fxgpr\" (UID: \"853c3a83-badd-474e-b356-5034158f9450\") " pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.268737 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/853c3a83-badd-474e-b356-5034158f9450-dns-swift-storage-0\") pod \"dnsmasq-dns-75bdffd66f-fxgpr\" (UID: \"853c3a83-badd-474e-b356-5034158f9450\") " pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.295431 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kkdlg\" (UniqueName: \"kubernetes.io/projected/853c3a83-badd-474e-b356-5034158f9450-kube-api-access-kkdlg\") pod \"dnsmasq-dns-75bdffd66f-fxgpr\" (UID: \"853c3a83-badd-474e-b356-5034158f9450\") " pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.355044 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-zzbhs"] Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.356446 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-zzbhs" Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.359185 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.367702 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-zzbhs"] Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.388830 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.471853 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ea248b17-90e3-464f-803b-a95ec269a2ad-operator-scripts\") pod \"root-account-create-update-zzbhs\" (UID: \"ea248b17-90e3-464f-803b-a95ec269a2ad\") " pod="openstack/root-account-create-update-zzbhs" Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.471920 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ll4nk\" (UniqueName: \"kubernetes.io/projected/ea248b17-90e3-464f-803b-a95ec269a2ad-kube-api-access-ll4nk\") pod \"root-account-create-update-zzbhs\" (UID: \"ea248b17-90e3-464f-803b-a95ec269a2ad\") " pod="openstack/root-account-create-update-zzbhs" Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.573430 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ea248b17-90e3-464f-803b-a95ec269a2ad-operator-scripts\") pod \"root-account-create-update-zzbhs\" (UID: \"ea248b17-90e3-464f-803b-a95ec269a2ad\") " pod="openstack/root-account-create-update-zzbhs" Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.573478 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ll4nk\" (UniqueName: \"kubernetes.io/projected/ea248b17-90e3-464f-803b-a95ec269a2ad-kube-api-access-ll4nk\") pod \"root-account-create-update-zzbhs\" (UID: \"ea248b17-90e3-464f-803b-a95ec269a2ad\") " pod="openstack/root-account-create-update-zzbhs" Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.575447 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ea248b17-90e3-464f-803b-a95ec269a2ad-operator-scripts\") pod \"root-account-create-update-zzbhs\" (UID: \"ea248b17-90e3-464f-803b-a95ec269a2ad\") " pod="openstack/root-account-create-update-zzbhs" Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.609650 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ll4nk\" (UniqueName: \"kubernetes.io/projected/ea248b17-90e3-464f-803b-a95ec269a2ad-kube-api-access-ll4nk\") pod \"root-account-create-update-zzbhs\" (UID: \"ea248b17-90e3-464f-803b-a95ec269a2ad\") " pod="openstack/root-account-create-update-zzbhs" Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.681722 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-zzbhs" Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.762073 4910 generic.go:334] "Generic (PLEG): container finished" podID="dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6" containerID="205d4b46e7bd413f6db4358dc1c3d1c4cb04c634807f6f47aed239d5fff6aad0" exitCode=0 Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.763417 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-cfp97-config-pgzvd" event={"ID":"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6","Type":"ContainerDied","Data":"205d4b46e7bd413f6db4358dc1c3d1c4cb04c634807f6f47aed239d5fff6aad0"} Jan 05 22:10:31 crc kubenswrapper[4910]: I0105 22:10:31.930802 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75bdffd66f-fxgpr"] Jan 05 22:10:31 crc kubenswrapper[4910]: W0105 22:10:31.933899 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod853c3a83_badd_474e_b356_5034158f9450.slice/crio-c4160d50beed7062923fa7e16ab70e929ae39e2f667380d6a560d16e5ccdb0d1 WatchSource:0}: Error finding container c4160d50beed7062923fa7e16ab70e929ae39e2f667380d6a560d16e5ccdb0d1: Status 404 returned error can't find the container with id c4160d50beed7062923fa7e16ab70e929ae39e2f667380d6a560d16e5ccdb0d1 Jan 05 22:10:32 crc kubenswrapper[4910]: I0105 22:10:32.164783 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-zzbhs"] Jan 05 22:10:32 crc kubenswrapper[4910]: W0105 22:10:32.177005 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podea248b17_90e3_464f_803b_a95ec269a2ad.slice/crio-585eebfe2fc9855d8a635b6e051058eb10863cd5e52f6aa2d469bf8c2226a491 WatchSource:0}: Error finding container 585eebfe2fc9855d8a635b6e051058eb10863cd5e52f6aa2d469bf8c2226a491: Status 404 returned error can't find the container with id 585eebfe2fc9855d8a635b6e051058eb10863cd5e52f6aa2d469bf8c2226a491 Jan 05 22:10:32 crc kubenswrapper[4910]: I0105 22:10:32.777173 4910 generic.go:334] "Generic (PLEG): container finished" podID="853c3a83-badd-474e-b356-5034158f9450" containerID="bfc5218769d16b681f150c0c77305c6aec81a9a558d8bbba578d9bb05efbd380" exitCode=0 Jan 05 22:10:32 crc kubenswrapper[4910]: I0105 22:10:32.777396 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" event={"ID":"853c3a83-badd-474e-b356-5034158f9450","Type":"ContainerDied","Data":"bfc5218769d16b681f150c0c77305c6aec81a9a558d8bbba578d9bb05efbd380"} Jan 05 22:10:32 crc kubenswrapper[4910]: I0105 22:10:32.777568 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" event={"ID":"853c3a83-badd-474e-b356-5034158f9450","Type":"ContainerStarted","Data":"c4160d50beed7062923fa7e16ab70e929ae39e2f667380d6a560d16e5ccdb0d1"} Jan 05 22:10:32 crc kubenswrapper[4910]: I0105 22:10:32.782660 4910 generic.go:334] "Generic (PLEG): container finished" podID="ea248b17-90e3-464f-803b-a95ec269a2ad" containerID="1bd09319b9d11bdb43c8058ccf667b97b6c47dd45db7f7355efe7383386b8570" exitCode=0 Jan 05 22:10:32 crc kubenswrapper[4910]: I0105 22:10:32.782859 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-zzbhs" event={"ID":"ea248b17-90e3-464f-803b-a95ec269a2ad","Type":"ContainerDied","Data":"1bd09319b9d11bdb43c8058ccf667b97b6c47dd45db7f7355efe7383386b8570"} Jan 05 22:10:32 crc kubenswrapper[4910]: I0105 22:10:32.782880 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-zzbhs" event={"ID":"ea248b17-90e3-464f-803b-a95ec269a2ad","Type":"ContainerStarted","Data":"585eebfe2fc9855d8a635b6e051058eb10863cd5e52f6aa2d469bf8c2226a491"} Jan 05 22:10:33 crc kubenswrapper[4910]: I0105 22:10:33.157918 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-cfp97-config-pgzvd" Jan 05 22:10:33 crc kubenswrapper[4910]: I0105 22:10:33.245037 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-var-run\") pod \"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6\" (UID: \"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6\") " Jan 05 22:10:33 crc kubenswrapper[4910]: I0105 22:10:33.245098 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9jkfq\" (UniqueName: \"kubernetes.io/projected/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-kube-api-access-9jkfq\") pod \"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6\" (UID: \"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6\") " Jan 05 22:10:33 crc kubenswrapper[4910]: I0105 22:10:33.245183 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-var-log-ovn\") pod \"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6\" (UID: \"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6\") " Jan 05 22:10:33 crc kubenswrapper[4910]: I0105 22:10:33.245224 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-var-run-ovn\") pod \"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6\" (UID: \"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6\") " Jan 05 22:10:33 crc kubenswrapper[4910]: I0105 22:10:33.245236 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-var-run" (OuterVolumeSpecName: "var-run") pod "dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6" (UID: "dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:10:33 crc kubenswrapper[4910]: I0105 22:10:33.245316 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-additional-scripts\") pod \"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6\" (UID: \"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6\") " Jan 05 22:10:33 crc kubenswrapper[4910]: I0105 22:10:33.245323 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6" (UID: "dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:10:33 crc kubenswrapper[4910]: I0105 22:10:33.245383 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6" (UID: "dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:10:33 crc kubenswrapper[4910]: I0105 22:10:33.245427 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-scripts\") pod \"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6\" (UID: \"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6\") " Jan 05 22:10:33 crc kubenswrapper[4910]: I0105 22:10:33.245792 4910 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-var-run\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:33 crc kubenswrapper[4910]: I0105 22:10:33.245807 4910 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:33 crc kubenswrapper[4910]: I0105 22:10:33.245816 4910 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:33 crc kubenswrapper[4910]: I0105 22:10:33.246538 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6" (UID: "dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:10:33 crc kubenswrapper[4910]: I0105 22:10:33.246663 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-scripts" (OuterVolumeSpecName: "scripts") pod "dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6" (UID: "dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:10:33 crc kubenswrapper[4910]: I0105 22:10:33.259681 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-kube-api-access-9jkfq" (OuterVolumeSpecName: "kube-api-access-9jkfq") pod "dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6" (UID: "dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6"). InnerVolumeSpecName "kube-api-access-9jkfq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:10:33 crc kubenswrapper[4910]: I0105 22:10:33.347504 4910 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:33 crc kubenswrapper[4910]: I0105 22:10:33.347541 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:33 crc kubenswrapper[4910]: I0105 22:10:33.347556 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9jkfq\" (UniqueName: \"kubernetes.io/projected/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6-kube-api-access-9jkfq\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:33 crc kubenswrapper[4910]: I0105 22:10:33.467019 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-cfp97" Jan 05 22:10:33 crc kubenswrapper[4910]: I0105 22:10:33.794833 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-cfp97-config-pgzvd" Jan 05 22:10:33 crc kubenswrapper[4910]: I0105 22:10:33.794892 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-cfp97-config-pgzvd" event={"ID":"dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6","Type":"ContainerDied","Data":"5909cb0afb9290b7aa52f96eaa61c0bc16442abb5f191d11efed7a4d04d9930a"} Jan 05 22:10:33 crc kubenswrapper[4910]: I0105 22:10:33.794931 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5909cb0afb9290b7aa52f96eaa61c0bc16442abb5f191d11efed7a4d04d9930a" Jan 05 22:10:33 crc kubenswrapper[4910]: I0105 22:10:33.799570 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" event={"ID":"853c3a83-badd-474e-b356-5034158f9450","Type":"ContainerStarted","Data":"ad0ae345e35fac61af32e0cff6a542a82c46c5fb3aaedcc6e1de4b8d81337d80"} Jan 05 22:10:33 crc kubenswrapper[4910]: I0105 22:10:33.799655 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" Jan 05 22:10:33 crc kubenswrapper[4910]: I0105 22:10:33.835877 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" podStartSLOduration=2.835829714 podStartE2EDuration="2.835829714s" podCreationTimestamp="2026-01-05 22:10:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:10:33.823773753 +0000 UTC m=+1165.401271433" watchObservedRunningTime="2026-01-05 22:10:33.835829714 +0000 UTC m=+1165.413327384" Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.143312 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-zzbhs" Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.261089 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-cfp97-config-pgzvd"] Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.269895 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-cfp97-config-pgzvd"] Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.302035 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ll4nk\" (UniqueName: \"kubernetes.io/projected/ea248b17-90e3-464f-803b-a95ec269a2ad-kube-api-access-ll4nk\") pod \"ea248b17-90e3-464f-803b-a95ec269a2ad\" (UID: \"ea248b17-90e3-464f-803b-a95ec269a2ad\") " Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.302155 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ea248b17-90e3-464f-803b-a95ec269a2ad-operator-scripts\") pod \"ea248b17-90e3-464f-803b-a95ec269a2ad\" (UID: \"ea248b17-90e3-464f-803b-a95ec269a2ad\") " Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.303232 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea248b17-90e3-464f-803b-a95ec269a2ad-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ea248b17-90e3-464f-803b-a95ec269a2ad" (UID: "ea248b17-90e3-464f-803b-a95ec269a2ad"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.312271 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea248b17-90e3-464f-803b-a95ec269a2ad-kube-api-access-ll4nk" (OuterVolumeSpecName: "kube-api-access-ll4nk") pod "ea248b17-90e3-464f-803b-a95ec269a2ad" (UID: "ea248b17-90e3-464f-803b-a95ec269a2ad"). InnerVolumeSpecName "kube-api-access-ll4nk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.410591 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ea248b17-90e3-464f-803b-a95ec269a2ad-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.410640 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ll4nk\" (UniqueName: \"kubernetes.io/projected/ea248b17-90e3-464f-803b-a95ec269a2ad-kube-api-access-ll4nk\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.412277 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.716017 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-fpldq"] Jan 05 22:10:34 crc kubenswrapper[4910]: E0105 22:10:34.716742 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea248b17-90e3-464f-803b-a95ec269a2ad" containerName="mariadb-account-create-update" Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.716838 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea248b17-90e3-464f-803b-a95ec269a2ad" containerName="mariadb-account-create-update" Jan 05 22:10:34 crc kubenswrapper[4910]: E0105 22:10:34.716909 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6" containerName="ovn-config" Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.716973 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6" containerName="ovn-config" Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.717225 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea248b17-90e3-464f-803b-a95ec269a2ad" containerName="mariadb-account-create-update" Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.717304 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6" containerName="ovn-config" Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.717908 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-fpldq" Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.747588 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6" path="/var/lib/kubelet/pods/dcb86919-a2a1-4563-a4c1-ad2c6c7efdf6/volumes" Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.748399 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-fpldq"] Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.782292 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.811924 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-zzbhs" event={"ID":"ea248b17-90e3-464f-803b-a95ec269a2ad","Type":"ContainerDied","Data":"585eebfe2fc9855d8a635b6e051058eb10863cd5e52f6aa2d469bf8c2226a491"} Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.812000 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="585eebfe2fc9855d8a635b6e051058eb10863cd5e52f6aa2d469bf8c2226a491" Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.811958 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-zzbhs" Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.826580 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a8a77378-db31-4715-b92e-2edc06b352a5-operator-scripts\") pod \"barbican-db-create-fpldq\" (UID: \"a8a77378-db31-4715-b92e-2edc06b352a5\") " pod="openstack/barbican-db-create-fpldq" Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.826655 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lfghq\" (UniqueName: \"kubernetes.io/projected/a8a77378-db31-4715-b92e-2edc06b352a5-kube-api-access-lfghq\") pod \"barbican-db-create-fpldq\" (UID: \"a8a77378-db31-4715-b92e-2edc06b352a5\") " pod="openstack/barbican-db-create-fpldq" Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.923412 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-kjj9c"] Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.924518 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-kjj9c" Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.928251 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a8a77378-db31-4715-b92e-2edc06b352a5-operator-scripts\") pod \"barbican-db-create-fpldq\" (UID: \"a8a77378-db31-4715-b92e-2edc06b352a5\") " pod="openstack/barbican-db-create-fpldq" Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.928342 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lfghq\" (UniqueName: \"kubernetes.io/projected/a8a77378-db31-4715-b92e-2edc06b352a5-kube-api-access-lfghq\") pod \"barbican-db-create-fpldq\" (UID: \"a8a77378-db31-4715-b92e-2edc06b352a5\") " pod="openstack/barbican-db-create-fpldq" Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.933287 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-kjj9c"] Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.933535 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a8a77378-db31-4715-b92e-2edc06b352a5-operator-scripts\") pod \"barbican-db-create-fpldq\" (UID: \"a8a77378-db31-4715-b92e-2edc06b352a5\") " pod="openstack/barbican-db-create-fpldq" Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.947138 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-1a42-account-create-update-4mgz8"] Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.948441 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1a42-account-create-update-4mgz8" Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.950423 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Jan 05 22:10:34 crc kubenswrapper[4910]: I0105 22:10:34.951277 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lfghq\" (UniqueName: \"kubernetes.io/projected/a8a77378-db31-4715-b92e-2edc06b352a5-kube-api-access-lfghq\") pod \"barbican-db-create-fpldq\" (UID: \"a8a77378-db31-4715-b92e-2edc06b352a5\") " pod="openstack/barbican-db-create-fpldq" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.012689 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-1a42-account-create-update-4mgz8"] Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.043380 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-fpldq" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.049111 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gclld\" (UniqueName: \"kubernetes.io/projected/2219fd99-b8c4-4918-8aa2-7f59a307dec5-kube-api-access-gclld\") pod \"cinder-db-create-kjj9c\" (UID: \"2219fd99-b8c4-4918-8aa2-7f59a307dec5\") " pod="openstack/cinder-db-create-kjj9c" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.049298 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf38198b-8f7d-4853-ba3d-e1968aa3a284-operator-scripts\") pod \"barbican-1a42-account-create-update-4mgz8\" (UID: \"bf38198b-8f7d-4853-ba3d-e1968aa3a284\") " pod="openstack/barbican-1a42-account-create-update-4mgz8" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.049457 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2219fd99-b8c4-4918-8aa2-7f59a307dec5-operator-scripts\") pod \"cinder-db-create-kjj9c\" (UID: \"2219fd99-b8c4-4918-8aa2-7f59a307dec5\") " pod="openstack/cinder-db-create-kjj9c" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.049590 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kpqtj\" (UniqueName: \"kubernetes.io/projected/bf38198b-8f7d-4853-ba3d-e1968aa3a284-kube-api-access-kpqtj\") pod \"barbican-1a42-account-create-update-4mgz8\" (UID: \"bf38198b-8f7d-4853-ba3d-e1968aa3a284\") " pod="openstack/barbican-1a42-account-create-update-4mgz8" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.105136 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-524a-account-create-update-d5t9q"] Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.106205 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-524a-account-create-update-d5t9q" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.128979 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-524a-account-create-update-d5t9q"] Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.140539 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.154163 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2219fd99-b8c4-4918-8aa2-7f59a307dec5-operator-scripts\") pod \"cinder-db-create-kjj9c\" (UID: \"2219fd99-b8c4-4918-8aa2-7f59a307dec5\") " pod="openstack/cinder-db-create-kjj9c" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.154240 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kpqtj\" (UniqueName: \"kubernetes.io/projected/bf38198b-8f7d-4853-ba3d-e1968aa3a284-kube-api-access-kpqtj\") pod \"barbican-1a42-account-create-update-4mgz8\" (UID: \"bf38198b-8f7d-4853-ba3d-e1968aa3a284\") " pod="openstack/barbican-1a42-account-create-update-4mgz8" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.154272 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gclld\" (UniqueName: \"kubernetes.io/projected/2219fd99-b8c4-4918-8aa2-7f59a307dec5-kube-api-access-gclld\") pod \"cinder-db-create-kjj9c\" (UID: \"2219fd99-b8c4-4918-8aa2-7f59a307dec5\") " pod="openstack/cinder-db-create-kjj9c" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.154350 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf38198b-8f7d-4853-ba3d-e1968aa3a284-operator-scripts\") pod \"barbican-1a42-account-create-update-4mgz8\" (UID: \"bf38198b-8f7d-4853-ba3d-e1968aa3a284\") " pod="openstack/barbican-1a42-account-create-update-4mgz8" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.155064 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf38198b-8f7d-4853-ba3d-e1968aa3a284-operator-scripts\") pod \"barbican-1a42-account-create-update-4mgz8\" (UID: \"bf38198b-8f7d-4853-ba3d-e1968aa3a284\") " pod="openstack/barbican-1a42-account-create-update-4mgz8" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.155627 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2219fd99-b8c4-4918-8aa2-7f59a307dec5-operator-scripts\") pod \"cinder-db-create-kjj9c\" (UID: \"2219fd99-b8c4-4918-8aa2-7f59a307dec5\") " pod="openstack/cinder-db-create-kjj9c" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.193703 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-mdgvq"] Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.198656 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-mdgvq" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.203463 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gclld\" (UniqueName: \"kubernetes.io/projected/2219fd99-b8c4-4918-8aa2-7f59a307dec5-kube-api-access-gclld\") pod \"cinder-db-create-kjj9c\" (UID: \"2219fd99-b8c4-4918-8aa2-7f59a307dec5\") " pod="openstack/cinder-db-create-kjj9c" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.211989 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kpqtj\" (UniqueName: \"kubernetes.io/projected/bf38198b-8f7d-4853-ba3d-e1968aa3a284-kube-api-access-kpqtj\") pod \"barbican-1a42-account-create-update-4mgz8\" (UID: \"bf38198b-8f7d-4853-ba3d-e1968aa3a284\") " pod="openstack/barbican-1a42-account-create-update-4mgz8" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.231948 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-mdgvq"] Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.262223 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba-operator-scripts\") pod \"cinder-524a-account-create-update-d5t9q\" (UID: \"7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba\") " pod="openstack/cinder-524a-account-create-update-d5t9q" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.262319 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vv5lm\" (UniqueName: \"kubernetes.io/projected/7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba-kube-api-access-vv5lm\") pod \"cinder-524a-account-create-update-d5t9q\" (UID: \"7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba\") " pod="openstack/cinder-524a-account-create-update-d5t9q" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.307101 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-kjj9c" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.333151 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-0eae-account-create-update-pldpc"] Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.334595 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-0eae-account-create-update-pldpc" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.339259 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.346460 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-0eae-account-create-update-pldpc"] Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.352258 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1a42-account-create-update-4mgz8" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.366049 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f127b53b-cd48-48bd-b890-2dd47e1abd37-operator-scripts\") pod \"neutron-db-create-mdgvq\" (UID: \"f127b53b-cd48-48bd-b890-2dd47e1abd37\") " pod="openstack/neutron-db-create-mdgvq" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.366237 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hr8dn\" (UniqueName: \"kubernetes.io/projected/f127b53b-cd48-48bd-b890-2dd47e1abd37-kube-api-access-hr8dn\") pod \"neutron-db-create-mdgvq\" (UID: \"f127b53b-cd48-48bd-b890-2dd47e1abd37\") " pod="openstack/neutron-db-create-mdgvq" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.366475 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba-operator-scripts\") pod \"cinder-524a-account-create-update-d5t9q\" (UID: \"7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba\") " pod="openstack/cinder-524a-account-create-update-d5t9q" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.366669 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vv5lm\" (UniqueName: \"kubernetes.io/projected/7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba-kube-api-access-vv5lm\") pod \"cinder-524a-account-create-update-d5t9q\" (UID: \"7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba\") " pod="openstack/cinder-524a-account-create-update-d5t9q" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.367404 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba-operator-scripts\") pod \"cinder-524a-account-create-update-d5t9q\" (UID: \"7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba\") " pod="openstack/cinder-524a-account-create-update-d5t9q" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.384561 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-wb5hf"] Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.412819 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-wb5hf" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.422443 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-t5hmz" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.422797 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.423008 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.434999 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vv5lm\" (UniqueName: \"kubernetes.io/projected/7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba-kube-api-access-vv5lm\") pod \"cinder-524a-account-create-update-d5t9q\" (UID: \"7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba\") " pod="openstack/cinder-524a-account-create-update-d5t9q" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.450620 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.470484 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hr8dn\" (UniqueName: \"kubernetes.io/projected/f127b53b-cd48-48bd-b890-2dd47e1abd37-kube-api-access-hr8dn\") pod \"neutron-db-create-mdgvq\" (UID: \"f127b53b-cd48-48bd-b890-2dd47e1abd37\") " pod="openstack/neutron-db-create-mdgvq" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.470764 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/267d700c-88ab-4264-8ee9-cb3b02d10b23-operator-scripts\") pod \"neutron-0eae-account-create-update-pldpc\" (UID: \"267d700c-88ab-4264-8ee9-cb3b02d10b23\") " pod="openstack/neutron-0eae-account-create-update-pldpc" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.471006 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f127b53b-cd48-48bd-b890-2dd47e1abd37-operator-scripts\") pod \"neutron-db-create-mdgvq\" (UID: \"f127b53b-cd48-48bd-b890-2dd47e1abd37\") " pod="openstack/neutron-db-create-mdgvq" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.471444 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n4zjn\" (UniqueName: \"kubernetes.io/projected/267d700c-88ab-4264-8ee9-cb3b02d10b23-kube-api-access-n4zjn\") pod \"neutron-0eae-account-create-update-pldpc\" (UID: \"267d700c-88ab-4264-8ee9-cb3b02d10b23\") " pod="openstack/neutron-0eae-account-create-update-pldpc" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.472970 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f127b53b-cd48-48bd-b890-2dd47e1abd37-operator-scripts\") pod \"neutron-db-create-mdgvq\" (UID: \"f127b53b-cd48-48bd-b890-2dd47e1abd37\") " pod="openstack/neutron-db-create-mdgvq" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.471228 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-524a-account-create-update-d5t9q" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.528834 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hr8dn\" (UniqueName: \"kubernetes.io/projected/f127b53b-cd48-48bd-b890-2dd47e1abd37-kube-api-access-hr8dn\") pod \"neutron-db-create-mdgvq\" (UID: \"f127b53b-cd48-48bd-b890-2dd47e1abd37\") " pod="openstack/neutron-db-create-mdgvq" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.547287 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-wb5hf"] Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.579431 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72da5def-cd27-4431-a7da-04b32457cdb1-combined-ca-bundle\") pod \"keystone-db-sync-wb5hf\" (UID: \"72da5def-cd27-4431-a7da-04b32457cdb1\") " pod="openstack/keystone-db-sync-wb5hf" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.579534 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cxnhr\" (UniqueName: \"kubernetes.io/projected/72da5def-cd27-4431-a7da-04b32457cdb1-kube-api-access-cxnhr\") pod \"keystone-db-sync-wb5hf\" (UID: \"72da5def-cd27-4431-a7da-04b32457cdb1\") " pod="openstack/keystone-db-sync-wb5hf" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.579591 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n4zjn\" (UniqueName: \"kubernetes.io/projected/267d700c-88ab-4264-8ee9-cb3b02d10b23-kube-api-access-n4zjn\") pod \"neutron-0eae-account-create-update-pldpc\" (UID: \"267d700c-88ab-4264-8ee9-cb3b02d10b23\") " pod="openstack/neutron-0eae-account-create-update-pldpc" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.579622 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/267d700c-88ab-4264-8ee9-cb3b02d10b23-operator-scripts\") pod \"neutron-0eae-account-create-update-pldpc\" (UID: \"267d700c-88ab-4264-8ee9-cb3b02d10b23\") " pod="openstack/neutron-0eae-account-create-update-pldpc" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.579645 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/72da5def-cd27-4431-a7da-04b32457cdb1-config-data\") pod \"keystone-db-sync-wb5hf\" (UID: \"72da5def-cd27-4431-a7da-04b32457cdb1\") " pod="openstack/keystone-db-sync-wb5hf" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.580766 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/267d700c-88ab-4264-8ee9-cb3b02d10b23-operator-scripts\") pod \"neutron-0eae-account-create-update-pldpc\" (UID: \"267d700c-88ab-4264-8ee9-cb3b02d10b23\") " pod="openstack/neutron-0eae-account-create-update-pldpc" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.590510 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-mdgvq" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.627832 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n4zjn\" (UniqueName: \"kubernetes.io/projected/267d700c-88ab-4264-8ee9-cb3b02d10b23-kube-api-access-n4zjn\") pod \"neutron-0eae-account-create-update-pldpc\" (UID: \"267d700c-88ab-4264-8ee9-cb3b02d10b23\") " pod="openstack/neutron-0eae-account-create-update-pldpc" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.672942 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-0eae-account-create-update-pldpc" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.681350 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72da5def-cd27-4431-a7da-04b32457cdb1-combined-ca-bundle\") pod \"keystone-db-sync-wb5hf\" (UID: \"72da5def-cd27-4431-a7da-04b32457cdb1\") " pod="openstack/keystone-db-sync-wb5hf" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.681460 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cxnhr\" (UniqueName: \"kubernetes.io/projected/72da5def-cd27-4431-a7da-04b32457cdb1-kube-api-access-cxnhr\") pod \"keystone-db-sync-wb5hf\" (UID: \"72da5def-cd27-4431-a7da-04b32457cdb1\") " pod="openstack/keystone-db-sync-wb5hf" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.681577 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/72da5def-cd27-4431-a7da-04b32457cdb1-config-data\") pod \"keystone-db-sync-wb5hf\" (UID: \"72da5def-cd27-4431-a7da-04b32457cdb1\") " pod="openstack/keystone-db-sync-wb5hf" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.694841 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72da5def-cd27-4431-a7da-04b32457cdb1-combined-ca-bundle\") pod \"keystone-db-sync-wb5hf\" (UID: \"72da5def-cd27-4431-a7da-04b32457cdb1\") " pod="openstack/keystone-db-sync-wb5hf" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.699372 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/72da5def-cd27-4431-a7da-04b32457cdb1-config-data\") pod \"keystone-db-sync-wb5hf\" (UID: \"72da5def-cd27-4431-a7da-04b32457cdb1\") " pod="openstack/keystone-db-sync-wb5hf" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.742234 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cxnhr\" (UniqueName: \"kubernetes.io/projected/72da5def-cd27-4431-a7da-04b32457cdb1-kube-api-access-cxnhr\") pod \"keystone-db-sync-wb5hf\" (UID: \"72da5def-cd27-4431-a7da-04b32457cdb1\") " pod="openstack/keystone-db-sync-wb5hf" Jan 05 22:10:35 crc kubenswrapper[4910]: I0105 22:10:35.772756 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-wb5hf" Jan 05 22:10:36 crc kubenswrapper[4910]: I0105 22:10:36.163714 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-1a42-account-create-update-4mgz8"] Jan 05 22:10:36 crc kubenswrapper[4910]: I0105 22:10:36.337161 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-mdgvq"] Jan 05 22:10:36 crc kubenswrapper[4910]: W0105 22:10:36.356659 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf127b53b_cd48_48bd_b890_2dd47e1abd37.slice/crio-afd3dc0723aadaeaae7366e8e6fab825362a5374d4eb2ef7d61c699dd605ad83 WatchSource:0}: Error finding container afd3dc0723aadaeaae7366e8e6fab825362a5374d4eb2ef7d61c699dd605ad83: Status 404 returned error can't find the container with id afd3dc0723aadaeaae7366e8e6fab825362a5374d4eb2ef7d61c699dd605ad83 Jan 05 22:10:36 crc kubenswrapper[4910]: I0105 22:10:36.361963 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-524a-account-create-update-d5t9q"] Jan 05 22:10:36 crc kubenswrapper[4910]: W0105 22:10:36.368239 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7dd7aaf9_4f0f_4c8a_ac0e_99d04d6a12ba.slice/crio-a8e68f6cf8403adc8626dc0fc21f2385866568265ea01ffd1c5c4bb029655086 WatchSource:0}: Error finding container a8e68f6cf8403adc8626dc0fc21f2385866568265ea01ffd1c5c4bb029655086: Status 404 returned error can't find the container with id a8e68f6cf8403adc8626dc0fc21f2385866568265ea01ffd1c5c4bb029655086 Jan 05 22:10:36 crc kubenswrapper[4910]: I0105 22:10:36.370836 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-kjj9c"] Jan 05 22:10:36 crc kubenswrapper[4910]: W0105 22:10:36.370854 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2219fd99_b8c4_4918_8aa2_7f59a307dec5.slice/crio-db901730e4a2791f2326997a755e978d27ed2e0d4c10fbe86aba4c34e094889c WatchSource:0}: Error finding container db901730e4a2791f2326997a755e978d27ed2e0d4c10fbe86aba4c34e094889c: Status 404 returned error can't find the container with id db901730e4a2791f2326997a755e978d27ed2e0d4c10fbe86aba4c34e094889c Jan 05 22:10:36 crc kubenswrapper[4910]: I0105 22:10:36.470866 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-fpldq"] Jan 05 22:10:36 crc kubenswrapper[4910]: I0105 22:10:36.492313 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-wb5hf"] Jan 05 22:10:36 crc kubenswrapper[4910]: I0105 22:10:36.501396 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-0eae-account-create-update-pldpc"] Jan 05 22:10:36 crc kubenswrapper[4910]: I0105 22:10:36.835696 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-0eae-account-create-update-pldpc" event={"ID":"267d700c-88ab-4264-8ee9-cb3b02d10b23","Type":"ContainerStarted","Data":"87e9b8376645c7e6c42e7636f5b18b1ac67fcc09f860f024514e6ec1ba4d6736"} Jan 05 22:10:36 crc kubenswrapper[4910]: I0105 22:10:36.836872 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-kjj9c" event={"ID":"2219fd99-b8c4-4918-8aa2-7f59a307dec5","Type":"ContainerStarted","Data":"db901730e4a2791f2326997a755e978d27ed2e0d4c10fbe86aba4c34e094889c"} Jan 05 22:10:36 crc kubenswrapper[4910]: I0105 22:10:36.837865 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-524a-account-create-update-d5t9q" event={"ID":"7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba","Type":"ContainerStarted","Data":"a8e68f6cf8403adc8626dc0fc21f2385866568265ea01ffd1c5c4bb029655086"} Jan 05 22:10:36 crc kubenswrapper[4910]: I0105 22:10:36.838667 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-wb5hf" event={"ID":"72da5def-cd27-4431-a7da-04b32457cdb1","Type":"ContainerStarted","Data":"c42c5bddf2d862abbf138dac2eadfbad795f48fb66c12a8fc8f9b9ad30ffc720"} Jan 05 22:10:36 crc kubenswrapper[4910]: I0105 22:10:36.839469 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-fpldq" event={"ID":"a8a77378-db31-4715-b92e-2edc06b352a5","Type":"ContainerStarted","Data":"a4ebe8c547f1d94e561c50897a175595a1b22a823c6a5af01dd44e76a850292b"} Jan 05 22:10:36 crc kubenswrapper[4910]: I0105 22:10:36.840314 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-mdgvq" event={"ID":"f127b53b-cd48-48bd-b890-2dd47e1abd37","Type":"ContainerStarted","Data":"afd3dc0723aadaeaae7366e8e6fab825362a5374d4eb2ef7d61c699dd605ad83"} Jan 05 22:10:36 crc kubenswrapper[4910]: I0105 22:10:36.841405 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-1a42-account-create-update-4mgz8" event={"ID":"bf38198b-8f7d-4853-ba3d-e1968aa3a284","Type":"ContainerStarted","Data":"455b563a04d1dcc2ce99873d59d38f61e18259d32e89758c18574198867cdd7b"} Jan 05 22:10:37 crc kubenswrapper[4910]: I0105 22:10:37.885039 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-1a42-account-create-update-4mgz8" event={"ID":"bf38198b-8f7d-4853-ba3d-e1968aa3a284","Type":"ContainerStarted","Data":"43d64ba74438276a1c444b8b716c3a5435851a1a1454c5658c6993d9ff542c25"} Jan 05 22:10:37 crc kubenswrapper[4910]: I0105 22:10:37.887171 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-0eae-account-create-update-pldpc" event={"ID":"267d700c-88ab-4264-8ee9-cb3b02d10b23","Type":"ContainerStarted","Data":"5e1d20fa64749e78dd0848923915f08c98945b04126d6cc3dc10a5ec7b2cb5d2"} Jan 05 22:10:37 crc kubenswrapper[4910]: I0105 22:10:37.892505 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-kjj9c" event={"ID":"2219fd99-b8c4-4918-8aa2-7f59a307dec5","Type":"ContainerStarted","Data":"4cd7dca0dfe15583bcba23955d426ace279334ab34815807ea04f47c94f20ebd"} Jan 05 22:10:37 crc kubenswrapper[4910]: I0105 22:10:37.894316 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-524a-account-create-update-d5t9q" event={"ID":"7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba","Type":"ContainerStarted","Data":"f0d59393ce7eea23f9ee13b4d3b5d8217da3802278d0f455c997ce0a2839f1c6"} Jan 05 22:10:37 crc kubenswrapper[4910]: I0105 22:10:37.905528 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-fpldq" event={"ID":"a8a77378-db31-4715-b92e-2edc06b352a5","Type":"ContainerStarted","Data":"93b46b88e064324b14c7d00a03e5638202dad25e6acbc02574f66c4dabeb04cf"} Jan 05 22:10:37 crc kubenswrapper[4910]: I0105 22:10:37.907819 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-mdgvq" event={"ID":"f127b53b-cd48-48bd-b890-2dd47e1abd37","Type":"ContainerStarted","Data":"e5c1bbf0050b8e5bf2be2b1af4a1f27408a68c84eca4d7f73bc456d12e9f2191"} Jan 05 22:10:37 crc kubenswrapper[4910]: I0105 22:10:37.910746 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-1a42-account-create-update-4mgz8" podStartSLOduration=3.910725075 podStartE2EDuration="3.910725075s" podCreationTimestamp="2026-01-05 22:10:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:10:37.902821184 +0000 UTC m=+1169.480318854" watchObservedRunningTime="2026-01-05 22:10:37.910725075 +0000 UTC m=+1169.488222755" Jan 05 22:10:37 crc kubenswrapper[4910]: I0105 22:10:37.918342 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-kjj9c" podStartSLOduration=3.918319478 podStartE2EDuration="3.918319478s" podCreationTimestamp="2026-01-05 22:10:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:10:37.913708027 +0000 UTC m=+1169.491205717" watchObservedRunningTime="2026-01-05 22:10:37.918319478 +0000 UTC m=+1169.495817148" Jan 05 22:10:37 crc kubenswrapper[4910]: I0105 22:10:37.932649 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-524a-account-create-update-d5t9q" podStartSLOduration=2.932628433 podStartE2EDuration="2.932628433s" podCreationTimestamp="2026-01-05 22:10:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:10:37.929472077 +0000 UTC m=+1169.506969747" watchObservedRunningTime="2026-01-05 22:10:37.932628433 +0000 UTC m=+1169.510126103" Jan 05 22:10:37 crc kubenswrapper[4910]: I0105 22:10:37.949536 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-0eae-account-create-update-pldpc" podStartSLOduration=2.949516701 podStartE2EDuration="2.949516701s" podCreationTimestamp="2026-01-05 22:10:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:10:37.939981341 +0000 UTC m=+1169.517479011" watchObservedRunningTime="2026-01-05 22:10:37.949516701 +0000 UTC m=+1169.527014371" Jan 05 22:10:37 crc kubenswrapper[4910]: I0105 22:10:37.957327 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-create-fpldq" podStartSLOduration=3.9573016389999998 podStartE2EDuration="3.957301639s" podCreationTimestamp="2026-01-05 22:10:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:10:37.954371928 +0000 UTC m=+1169.531869608" watchObservedRunningTime="2026-01-05 22:10:37.957301639 +0000 UTC m=+1169.534799309" Jan 05 22:10:37 crc kubenswrapper[4910]: I0105 22:10:37.975237 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-create-mdgvq" podStartSLOduration=2.975205381 podStartE2EDuration="2.975205381s" podCreationTimestamp="2026-01-05 22:10:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:10:37.967229019 +0000 UTC m=+1169.544726689" watchObservedRunningTime="2026-01-05 22:10:37.975205381 +0000 UTC m=+1169.552703071" Jan 05 22:10:38 crc kubenswrapper[4910]: I0105 22:10:38.919602 4910 generic.go:334] "Generic (PLEG): container finished" podID="2219fd99-b8c4-4918-8aa2-7f59a307dec5" containerID="4cd7dca0dfe15583bcba23955d426ace279334ab34815807ea04f47c94f20ebd" exitCode=0 Jan 05 22:10:38 crc kubenswrapper[4910]: I0105 22:10:38.919710 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-kjj9c" event={"ID":"2219fd99-b8c4-4918-8aa2-7f59a307dec5","Type":"ContainerDied","Data":"4cd7dca0dfe15583bcba23955d426ace279334ab34815807ea04f47c94f20ebd"} Jan 05 22:10:38 crc kubenswrapper[4910]: I0105 22:10:38.924053 4910 generic.go:334] "Generic (PLEG): container finished" podID="a8a77378-db31-4715-b92e-2edc06b352a5" containerID="93b46b88e064324b14c7d00a03e5638202dad25e6acbc02574f66c4dabeb04cf" exitCode=0 Jan 05 22:10:38 crc kubenswrapper[4910]: I0105 22:10:38.924186 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-fpldq" event={"ID":"a8a77378-db31-4715-b92e-2edc06b352a5","Type":"ContainerDied","Data":"93b46b88e064324b14c7d00a03e5638202dad25e6acbc02574f66c4dabeb04cf"} Jan 05 22:10:39 crc kubenswrapper[4910]: I0105 22:10:39.936497 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-524a-account-create-update-d5t9q" event={"ID":"7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba","Type":"ContainerDied","Data":"f0d59393ce7eea23f9ee13b4d3b5d8217da3802278d0f455c997ce0a2839f1c6"} Jan 05 22:10:39 crc kubenswrapper[4910]: I0105 22:10:39.936391 4910 generic.go:334] "Generic (PLEG): container finished" podID="7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba" containerID="f0d59393ce7eea23f9ee13b4d3b5d8217da3802278d0f455c997ce0a2839f1c6" exitCode=0 Jan 05 22:10:39 crc kubenswrapper[4910]: I0105 22:10:39.939698 4910 generic.go:334] "Generic (PLEG): container finished" podID="f127b53b-cd48-48bd-b890-2dd47e1abd37" containerID="e5c1bbf0050b8e5bf2be2b1af4a1f27408a68c84eca4d7f73bc456d12e9f2191" exitCode=0 Jan 05 22:10:39 crc kubenswrapper[4910]: I0105 22:10:39.939773 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-mdgvq" event={"ID":"f127b53b-cd48-48bd-b890-2dd47e1abd37","Type":"ContainerDied","Data":"e5c1bbf0050b8e5bf2be2b1af4a1f27408a68c84eca4d7f73bc456d12e9f2191"} Jan 05 22:10:39 crc kubenswrapper[4910]: I0105 22:10:39.944450 4910 generic.go:334] "Generic (PLEG): container finished" podID="bf38198b-8f7d-4853-ba3d-e1968aa3a284" containerID="43d64ba74438276a1c444b8b716c3a5435851a1a1454c5658c6993d9ff542c25" exitCode=0 Jan 05 22:10:39 crc kubenswrapper[4910]: I0105 22:10:39.944518 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-1a42-account-create-update-4mgz8" event={"ID":"bf38198b-8f7d-4853-ba3d-e1968aa3a284","Type":"ContainerDied","Data":"43d64ba74438276a1c444b8b716c3a5435851a1a1454c5658c6993d9ff542c25"} Jan 05 22:10:39 crc kubenswrapper[4910]: I0105 22:10:39.946331 4910 generic.go:334] "Generic (PLEG): container finished" podID="267d700c-88ab-4264-8ee9-cb3b02d10b23" containerID="5e1d20fa64749e78dd0848923915f08c98945b04126d6cc3dc10a5ec7b2cb5d2" exitCode=0 Jan 05 22:10:39 crc kubenswrapper[4910]: I0105 22:10:39.946575 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-0eae-account-create-update-pldpc" event={"ID":"267d700c-88ab-4264-8ee9-cb3b02d10b23","Type":"ContainerDied","Data":"5e1d20fa64749e78dd0848923915f08c98945b04126d6cc3dc10a5ec7b2cb5d2"} Jan 05 22:10:41 crc kubenswrapper[4910]: I0105 22:10:41.391349 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" Jan 05 22:10:41 crc kubenswrapper[4910]: I0105 22:10:41.472226 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67fdf7998c-ghk4m"] Jan 05 22:10:41 crc kubenswrapper[4910]: I0105 22:10:41.472636 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-67fdf7998c-ghk4m" podUID="37ca269e-6da3-4dba-943f-6f2e957c8036" containerName="dnsmasq-dns" containerID="cri-o://c3a60b5e77b926f34a95e627d81c9ffa761857008a997e6e6062410af816fedf" gracePeriod=10 Jan 05 22:10:42 crc kubenswrapper[4910]: I0105 22:10:42.975451 4910 generic.go:334] "Generic (PLEG): container finished" podID="37ca269e-6da3-4dba-943f-6f2e957c8036" containerID="c3a60b5e77b926f34a95e627d81c9ffa761857008a997e6e6062410af816fedf" exitCode=0 Jan 05 22:10:42 crc kubenswrapper[4910]: I0105 22:10:42.975504 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67fdf7998c-ghk4m" event={"ID":"37ca269e-6da3-4dba-943f-6f2e957c8036","Type":"ContainerDied","Data":"c3a60b5e77b926f34a95e627d81c9ffa761857008a997e6e6062410af816fedf"} Jan 05 22:10:44 crc kubenswrapper[4910]: I0105 22:10:44.696306 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-67fdf7998c-ghk4m" podUID="37ca269e-6da3-4dba-943f-6f2e957c8036" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.113:5353: connect: connection refused" Jan 05 22:10:49 crc kubenswrapper[4910]: E0105 22:10:49.057413 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-glance-api@sha256:e4aa4ebbb1e581a12040e9ad2ae2709ac31b5d965bb64fc4252d1028b05c565f" Jan 05 22:10:49 crc kubenswrapper[4910]: E0105 22:10:49.058165 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.io/podified-antelope-centos9/openstack-glance-api@sha256:e4aa4ebbb1e581a12040e9ad2ae2709ac31b5d965bb64fc4252d1028b05c565f,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2kxkn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-75wjt_openstack(569058f0-d9dd-45de-a0ce-dd38bb6ce341): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 05 22:10:49 crc kubenswrapper[4910]: E0105 22:10:49.059402 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-75wjt" podUID="569058f0-d9dd-45de-a0ce-dd38bb6ce341" Jan 05 22:10:49 crc kubenswrapper[4910]: I0105 22:10:49.122824 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-0eae-account-create-update-pldpc" Jan 05 22:10:49 crc kubenswrapper[4910]: I0105 22:10:49.130783 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-mdgvq" Jan 05 22:10:49 crc kubenswrapper[4910]: I0105 22:10:49.175422 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n4zjn\" (UniqueName: \"kubernetes.io/projected/267d700c-88ab-4264-8ee9-cb3b02d10b23-kube-api-access-n4zjn\") pod \"267d700c-88ab-4264-8ee9-cb3b02d10b23\" (UID: \"267d700c-88ab-4264-8ee9-cb3b02d10b23\") " Jan 05 22:10:49 crc kubenswrapper[4910]: I0105 22:10:49.175539 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f127b53b-cd48-48bd-b890-2dd47e1abd37-operator-scripts\") pod \"f127b53b-cd48-48bd-b890-2dd47e1abd37\" (UID: \"f127b53b-cd48-48bd-b890-2dd47e1abd37\") " Jan 05 22:10:49 crc kubenswrapper[4910]: I0105 22:10:49.175644 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hr8dn\" (UniqueName: \"kubernetes.io/projected/f127b53b-cd48-48bd-b890-2dd47e1abd37-kube-api-access-hr8dn\") pod \"f127b53b-cd48-48bd-b890-2dd47e1abd37\" (UID: \"f127b53b-cd48-48bd-b890-2dd47e1abd37\") " Jan 05 22:10:49 crc kubenswrapper[4910]: I0105 22:10:49.175731 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/267d700c-88ab-4264-8ee9-cb3b02d10b23-operator-scripts\") pod \"267d700c-88ab-4264-8ee9-cb3b02d10b23\" (UID: \"267d700c-88ab-4264-8ee9-cb3b02d10b23\") " Jan 05 22:10:49 crc kubenswrapper[4910]: I0105 22:10:49.178428 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/267d700c-88ab-4264-8ee9-cb3b02d10b23-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "267d700c-88ab-4264-8ee9-cb3b02d10b23" (UID: "267d700c-88ab-4264-8ee9-cb3b02d10b23"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:10:49 crc kubenswrapper[4910]: I0105 22:10:49.178496 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f127b53b-cd48-48bd-b890-2dd47e1abd37-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f127b53b-cd48-48bd-b890-2dd47e1abd37" (UID: "f127b53b-cd48-48bd-b890-2dd47e1abd37"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:10:49 crc kubenswrapper[4910]: I0105 22:10:49.186190 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f127b53b-cd48-48bd-b890-2dd47e1abd37-kube-api-access-hr8dn" (OuterVolumeSpecName: "kube-api-access-hr8dn") pod "f127b53b-cd48-48bd-b890-2dd47e1abd37" (UID: "f127b53b-cd48-48bd-b890-2dd47e1abd37"). InnerVolumeSpecName "kube-api-access-hr8dn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:10:49 crc kubenswrapper[4910]: I0105 22:10:49.191353 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/267d700c-88ab-4264-8ee9-cb3b02d10b23-kube-api-access-n4zjn" (OuterVolumeSpecName: "kube-api-access-n4zjn") pod "267d700c-88ab-4264-8ee9-cb3b02d10b23" (UID: "267d700c-88ab-4264-8ee9-cb3b02d10b23"). InnerVolumeSpecName "kube-api-access-n4zjn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:10:49 crc kubenswrapper[4910]: I0105 22:10:49.279461 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hr8dn\" (UniqueName: \"kubernetes.io/projected/f127b53b-cd48-48bd-b890-2dd47e1abd37-kube-api-access-hr8dn\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:49 crc kubenswrapper[4910]: I0105 22:10:49.279492 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/267d700c-88ab-4264-8ee9-cb3b02d10b23-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:49 crc kubenswrapper[4910]: I0105 22:10:49.279501 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n4zjn\" (UniqueName: \"kubernetes.io/projected/267d700c-88ab-4264-8ee9-cb3b02d10b23-kube-api-access-n4zjn\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:49 crc kubenswrapper[4910]: I0105 22:10:49.279512 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f127b53b-cd48-48bd-b890-2dd47e1abd37-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:49 crc kubenswrapper[4910]: I0105 22:10:49.695540 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-67fdf7998c-ghk4m" podUID="37ca269e-6da3-4dba-943f-6f2e957c8036" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.113:5353: connect: connection refused" Jan 05 22:10:50 crc kubenswrapper[4910]: I0105 22:10:50.042171 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-mdgvq" event={"ID":"f127b53b-cd48-48bd-b890-2dd47e1abd37","Type":"ContainerDied","Data":"afd3dc0723aadaeaae7366e8e6fab825362a5374d4eb2ef7d61c699dd605ad83"} Jan 05 22:10:50 crc kubenswrapper[4910]: I0105 22:10:50.042218 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="afd3dc0723aadaeaae7366e8e6fab825362a5374d4eb2ef7d61c699dd605ad83" Jan 05 22:10:50 crc kubenswrapper[4910]: I0105 22:10:50.042235 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-mdgvq" Jan 05 22:10:50 crc kubenswrapper[4910]: I0105 22:10:50.043812 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-0eae-account-create-update-pldpc" Jan 05 22:10:50 crc kubenswrapper[4910]: I0105 22:10:50.043931 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-0eae-account-create-update-pldpc" event={"ID":"267d700c-88ab-4264-8ee9-cb3b02d10b23","Type":"ContainerDied","Data":"87e9b8376645c7e6c42e7636f5b18b1ac67fcc09f860f024514e6ec1ba4d6736"} Jan 05 22:10:50 crc kubenswrapper[4910]: I0105 22:10:50.043982 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="87e9b8376645c7e6c42e7636f5b18b1ac67fcc09f860f024514e6ec1ba4d6736" Jan 05 22:10:50 crc kubenswrapper[4910]: E0105 22:10:50.046394 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-glance-api@sha256:e4aa4ebbb1e581a12040e9ad2ae2709ac31b5d965bb64fc4252d1028b05c565f\\\"\"" pod="openstack/glance-db-sync-75wjt" podUID="569058f0-d9dd-45de-a0ce-dd38bb6ce341" Jan 05 22:10:51 crc kubenswrapper[4910]: E0105 22:10:51.596270 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-keystone@sha256:d042d7f91bafb002affff8cf750d694a0da129377255c502028528fe2280e790" Jan 05 22:10:51 crc kubenswrapper[4910]: E0105 22:10:51.596815 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:keystone-db-sync,Image:quay.io/podified-antelope-centos9/openstack-keystone@sha256:d042d7f91bafb002affff8cf750d694a0da129377255c502028528fe2280e790,Command:[/bin/bash],Args:[-c keystone-manage db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/keystone/keystone.conf,SubPath:keystone.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cxnhr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42425,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42425,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-db-sync-wb5hf_openstack(72da5def-cd27-4431-a7da-04b32457cdb1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 05 22:10:51 crc kubenswrapper[4910]: E0105 22:10:51.598062 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"keystone-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/keystone-db-sync-wb5hf" podUID="72da5def-cd27-4431-a7da-04b32457cdb1" Jan 05 22:10:51 crc kubenswrapper[4910]: I0105 22:10:51.699627 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-524a-account-create-update-d5t9q" Jan 05 22:10:51 crc kubenswrapper[4910]: I0105 22:10:51.706674 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-fpldq" Jan 05 22:10:51 crc kubenswrapper[4910]: I0105 22:10:51.716310 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-kjj9c" Jan 05 22:10:51 crc kubenswrapper[4910]: I0105 22:10:51.748009 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1a42-account-create-update-4mgz8" Jan 05 22:10:51 crc kubenswrapper[4910]: I0105 22:10:51.829816 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lfghq\" (UniqueName: \"kubernetes.io/projected/a8a77378-db31-4715-b92e-2edc06b352a5-kube-api-access-lfghq\") pod \"a8a77378-db31-4715-b92e-2edc06b352a5\" (UID: \"a8a77378-db31-4715-b92e-2edc06b352a5\") " Jan 05 22:10:51 crc kubenswrapper[4910]: I0105 22:10:51.829880 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vv5lm\" (UniqueName: \"kubernetes.io/projected/7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba-kube-api-access-vv5lm\") pod \"7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba\" (UID: \"7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba\") " Jan 05 22:10:51 crc kubenswrapper[4910]: I0105 22:10:51.830013 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba-operator-scripts\") pod \"7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba\" (UID: \"7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba\") " Jan 05 22:10:51 crc kubenswrapper[4910]: I0105 22:10:51.830081 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf38198b-8f7d-4853-ba3d-e1968aa3a284-operator-scripts\") pod \"bf38198b-8f7d-4853-ba3d-e1968aa3a284\" (UID: \"bf38198b-8f7d-4853-ba3d-e1968aa3a284\") " Jan 05 22:10:51 crc kubenswrapper[4910]: I0105 22:10:51.830103 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2219fd99-b8c4-4918-8aa2-7f59a307dec5-operator-scripts\") pod \"2219fd99-b8c4-4918-8aa2-7f59a307dec5\" (UID: \"2219fd99-b8c4-4918-8aa2-7f59a307dec5\") " Jan 05 22:10:51 crc kubenswrapper[4910]: I0105 22:10:51.830193 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gclld\" (UniqueName: \"kubernetes.io/projected/2219fd99-b8c4-4918-8aa2-7f59a307dec5-kube-api-access-gclld\") pod \"2219fd99-b8c4-4918-8aa2-7f59a307dec5\" (UID: \"2219fd99-b8c4-4918-8aa2-7f59a307dec5\") " Jan 05 22:10:51 crc kubenswrapper[4910]: I0105 22:10:51.830217 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a8a77378-db31-4715-b92e-2edc06b352a5-operator-scripts\") pod \"a8a77378-db31-4715-b92e-2edc06b352a5\" (UID: \"a8a77378-db31-4715-b92e-2edc06b352a5\") " Jan 05 22:10:51 crc kubenswrapper[4910]: I0105 22:10:51.830273 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kpqtj\" (UniqueName: \"kubernetes.io/projected/bf38198b-8f7d-4853-ba3d-e1968aa3a284-kube-api-access-kpqtj\") pod \"bf38198b-8f7d-4853-ba3d-e1968aa3a284\" (UID: \"bf38198b-8f7d-4853-ba3d-e1968aa3a284\") " Jan 05 22:10:51 crc kubenswrapper[4910]: I0105 22:10:51.832807 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba" (UID: "7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:10:51 crc kubenswrapper[4910]: I0105 22:10:51.833120 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a8a77378-db31-4715-b92e-2edc06b352a5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a8a77378-db31-4715-b92e-2edc06b352a5" (UID: "a8a77378-db31-4715-b92e-2edc06b352a5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:10:51 crc kubenswrapper[4910]: I0105 22:10:51.833324 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf38198b-8f7d-4853-ba3d-e1968aa3a284-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bf38198b-8f7d-4853-ba3d-e1968aa3a284" (UID: "bf38198b-8f7d-4853-ba3d-e1968aa3a284"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:10:51 crc kubenswrapper[4910]: I0105 22:10:51.834708 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2219fd99-b8c4-4918-8aa2-7f59a307dec5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2219fd99-b8c4-4918-8aa2-7f59a307dec5" (UID: "2219fd99-b8c4-4918-8aa2-7f59a307dec5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:10:51 crc kubenswrapper[4910]: I0105 22:10:51.836425 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2219fd99-b8c4-4918-8aa2-7f59a307dec5-kube-api-access-gclld" (OuterVolumeSpecName: "kube-api-access-gclld") pod "2219fd99-b8c4-4918-8aa2-7f59a307dec5" (UID: "2219fd99-b8c4-4918-8aa2-7f59a307dec5"). InnerVolumeSpecName "kube-api-access-gclld". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:10:51 crc kubenswrapper[4910]: I0105 22:10:51.837088 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8a77378-db31-4715-b92e-2edc06b352a5-kube-api-access-lfghq" (OuterVolumeSpecName: "kube-api-access-lfghq") pod "a8a77378-db31-4715-b92e-2edc06b352a5" (UID: "a8a77378-db31-4715-b92e-2edc06b352a5"). InnerVolumeSpecName "kube-api-access-lfghq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:10:51 crc kubenswrapper[4910]: I0105 22:10:51.844019 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf38198b-8f7d-4853-ba3d-e1968aa3a284-kube-api-access-kpqtj" (OuterVolumeSpecName: "kube-api-access-kpqtj") pod "bf38198b-8f7d-4853-ba3d-e1968aa3a284" (UID: "bf38198b-8f7d-4853-ba3d-e1968aa3a284"). InnerVolumeSpecName "kube-api-access-kpqtj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:10:51 crc kubenswrapper[4910]: I0105 22:10:51.844363 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba-kube-api-access-vv5lm" (OuterVolumeSpecName: "kube-api-access-vv5lm") pod "7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba" (UID: "7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba"). InnerVolumeSpecName "kube-api-access-vv5lm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:10:51 crc kubenswrapper[4910]: I0105 22:10:51.926656 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67fdf7998c-ghk4m" Jan 05 22:10:51 crc kubenswrapper[4910]: I0105 22:10:51.933334 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kpqtj\" (UniqueName: \"kubernetes.io/projected/bf38198b-8f7d-4853-ba3d-e1968aa3a284-kube-api-access-kpqtj\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:51 crc kubenswrapper[4910]: I0105 22:10:51.933377 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lfghq\" (UniqueName: \"kubernetes.io/projected/a8a77378-db31-4715-b92e-2edc06b352a5-kube-api-access-lfghq\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:51 crc kubenswrapper[4910]: I0105 22:10:51.933393 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vv5lm\" (UniqueName: \"kubernetes.io/projected/7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba-kube-api-access-vv5lm\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:51 crc kubenswrapper[4910]: I0105 22:10:51.933406 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:51 crc kubenswrapper[4910]: I0105 22:10:51.933418 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2219fd99-b8c4-4918-8aa2-7f59a307dec5-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:51 crc kubenswrapper[4910]: I0105 22:10:51.933428 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf38198b-8f7d-4853-ba3d-e1968aa3a284-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:51 crc kubenswrapper[4910]: I0105 22:10:51.933438 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gclld\" (UniqueName: \"kubernetes.io/projected/2219fd99-b8c4-4918-8aa2-7f59a307dec5-kube-api-access-gclld\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:51 crc kubenswrapper[4910]: I0105 22:10:51.933448 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a8a77378-db31-4715-b92e-2edc06b352a5-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.034859 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/37ca269e-6da3-4dba-943f-6f2e957c8036-dns-svc\") pod \"37ca269e-6da3-4dba-943f-6f2e957c8036\" (UID: \"37ca269e-6da3-4dba-943f-6f2e957c8036\") " Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.034979 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/37ca269e-6da3-4dba-943f-6f2e957c8036-ovsdbserver-nb\") pod \"37ca269e-6da3-4dba-943f-6f2e957c8036\" (UID: \"37ca269e-6da3-4dba-943f-6f2e957c8036\") " Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.035072 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hnfcg\" (UniqueName: \"kubernetes.io/projected/37ca269e-6da3-4dba-943f-6f2e957c8036-kube-api-access-hnfcg\") pod \"37ca269e-6da3-4dba-943f-6f2e957c8036\" (UID: \"37ca269e-6da3-4dba-943f-6f2e957c8036\") " Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.035200 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37ca269e-6da3-4dba-943f-6f2e957c8036-config\") pod \"37ca269e-6da3-4dba-943f-6f2e957c8036\" (UID: \"37ca269e-6da3-4dba-943f-6f2e957c8036\") " Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.035240 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/37ca269e-6da3-4dba-943f-6f2e957c8036-ovsdbserver-sb\") pod \"37ca269e-6da3-4dba-943f-6f2e957c8036\" (UID: \"37ca269e-6da3-4dba-943f-6f2e957c8036\") " Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.053768 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37ca269e-6da3-4dba-943f-6f2e957c8036-kube-api-access-hnfcg" (OuterVolumeSpecName: "kube-api-access-hnfcg") pod "37ca269e-6da3-4dba-943f-6f2e957c8036" (UID: "37ca269e-6da3-4dba-943f-6f2e957c8036"). InnerVolumeSpecName "kube-api-access-hnfcg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.104105 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67fdf7998c-ghk4m" event={"ID":"37ca269e-6da3-4dba-943f-6f2e957c8036","Type":"ContainerDied","Data":"a901abb7d90cdaf2ea81dc7ddeceb92c320a1fb1dc9fb52b77d7f0aa596a3eab"} Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.104176 4910 scope.go:117] "RemoveContainer" containerID="c3a60b5e77b926f34a95e627d81c9ffa761857008a997e6e6062410af816fedf" Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.104358 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67fdf7998c-ghk4m" Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.110948 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-fpldq" event={"ID":"a8a77378-db31-4715-b92e-2edc06b352a5","Type":"ContainerDied","Data":"a4ebe8c547f1d94e561c50897a175595a1b22a823c6a5af01dd44e76a850292b"} Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.111012 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a4ebe8c547f1d94e561c50897a175595a1b22a823c6a5af01dd44e76a850292b" Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.111092 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-fpldq" Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.120738 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-1a42-account-create-update-4mgz8" event={"ID":"bf38198b-8f7d-4853-ba3d-e1968aa3a284","Type":"ContainerDied","Data":"455b563a04d1dcc2ce99873d59d38f61e18259d32e89758c18574198867cdd7b"} Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.120781 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="455b563a04d1dcc2ce99873d59d38f61e18259d32e89758c18574198867cdd7b" Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.120847 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1a42-account-create-update-4mgz8" Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.132910 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/37ca269e-6da3-4dba-943f-6f2e957c8036-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "37ca269e-6da3-4dba-943f-6f2e957c8036" (UID: "37ca269e-6da3-4dba-943f-6f2e957c8036"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.137993 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/37ca269e-6da3-4dba-943f-6f2e957c8036-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.138015 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hnfcg\" (UniqueName: \"kubernetes.io/projected/37ca269e-6da3-4dba-943f-6f2e957c8036-kube-api-access-hnfcg\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.139067 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-kjj9c" event={"ID":"2219fd99-b8c4-4918-8aa2-7f59a307dec5","Type":"ContainerDied","Data":"db901730e4a2791f2326997a755e978d27ed2e0d4c10fbe86aba4c34e094889c"} Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.139102 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="db901730e4a2791f2326997a755e978d27ed2e0d4c10fbe86aba4c34e094889c" Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.147781 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-kjj9c" Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.154614 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/37ca269e-6da3-4dba-943f-6f2e957c8036-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "37ca269e-6da3-4dba-943f-6f2e957c8036" (UID: "37ca269e-6da3-4dba-943f-6f2e957c8036"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.155829 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-524a-account-create-update-d5t9q" Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.155996 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-524a-account-create-update-d5t9q" event={"ID":"7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba","Type":"ContainerDied","Data":"a8e68f6cf8403adc8626dc0fc21f2385866568265ea01ffd1c5c4bb029655086"} Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.156093 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a8e68f6cf8403adc8626dc0fc21f2385866568265ea01ffd1c5c4bb029655086" Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.156463 4910 scope.go:117] "RemoveContainer" containerID="c1cf57a3c90ac52dd9c7f167357a63e5f638af128a9fc9c6a1d621c7efd91957" Jan 05 22:10:52 crc kubenswrapper[4910]: E0105 22:10:52.157282 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"keystone-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-keystone@sha256:d042d7f91bafb002affff8cf750d694a0da129377255c502028528fe2280e790\\\"\"" pod="openstack/keystone-db-sync-wb5hf" podUID="72da5def-cd27-4431-a7da-04b32457cdb1" Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.176965 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/37ca269e-6da3-4dba-943f-6f2e957c8036-config" (OuterVolumeSpecName: "config") pod "37ca269e-6da3-4dba-943f-6f2e957c8036" (UID: "37ca269e-6da3-4dba-943f-6f2e957c8036"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.177971 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/37ca269e-6da3-4dba-943f-6f2e957c8036-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "37ca269e-6da3-4dba-943f-6f2e957c8036" (UID: "37ca269e-6da3-4dba-943f-6f2e957c8036"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.239774 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37ca269e-6da3-4dba-943f-6f2e957c8036-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.239818 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/37ca269e-6da3-4dba-943f-6f2e957c8036-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.239835 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/37ca269e-6da3-4dba-943f-6f2e957c8036-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.439061 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67fdf7998c-ghk4m"] Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.446253 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-67fdf7998c-ghk4m"] Jan 05 22:10:52 crc kubenswrapper[4910]: I0105 22:10:52.734796 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37ca269e-6da3-4dba-943f-6f2e957c8036" path="/var/lib/kubelet/pods/37ca269e-6da3-4dba-943f-6f2e957c8036/volumes" Jan 05 22:11:03 crc kubenswrapper[4910]: I0105 22:11:03.256877 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-75wjt" event={"ID":"569058f0-d9dd-45de-a0ce-dd38bb6ce341","Type":"ContainerStarted","Data":"04739ac8541b963a572f27c473632741c9601a8fcf8c88e6bb84e5530fcc2531"} Jan 05 22:11:03 crc kubenswrapper[4910]: I0105 22:11:03.743257 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-75wjt" podStartSLOduration=3.755660074 podStartE2EDuration="36.743230557s" podCreationTimestamp="2026-01-05 22:10:27 +0000 UTC" firstStartedPulling="2026-01-05 22:10:29.305880038 +0000 UTC m=+1160.883377708" lastFinishedPulling="2026-01-05 22:11:02.293450511 +0000 UTC m=+1193.870948191" observedRunningTime="2026-01-05 22:11:03.282381512 +0000 UTC m=+1194.859879252" watchObservedRunningTime="2026-01-05 22:11:03.743230557 +0000 UTC m=+1195.320728247" Jan 05 22:11:05 crc kubenswrapper[4910]: I0105 22:11:05.284332 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-wb5hf" event={"ID":"72da5def-cd27-4431-a7da-04b32457cdb1","Type":"ContainerStarted","Data":"b79059d44414a4899f09fc0f78288c60bf77b33617f459df2cbbbe3c43f950e9"} Jan 05 22:11:08 crc kubenswrapper[4910]: I0105 22:11:08.330382 4910 generic.go:334] "Generic (PLEG): container finished" podID="72da5def-cd27-4431-a7da-04b32457cdb1" containerID="b79059d44414a4899f09fc0f78288c60bf77b33617f459df2cbbbe3c43f950e9" exitCode=0 Jan 05 22:11:08 crc kubenswrapper[4910]: I0105 22:11:08.330501 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-wb5hf" event={"ID":"72da5def-cd27-4431-a7da-04b32457cdb1","Type":"ContainerDied","Data":"b79059d44414a4899f09fc0f78288c60bf77b33617f459df2cbbbe3c43f950e9"} Jan 05 22:11:09 crc kubenswrapper[4910]: I0105 22:11:09.719290 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-wb5hf" Jan 05 22:11:09 crc kubenswrapper[4910]: I0105 22:11:09.766944 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cxnhr\" (UniqueName: \"kubernetes.io/projected/72da5def-cd27-4431-a7da-04b32457cdb1-kube-api-access-cxnhr\") pod \"72da5def-cd27-4431-a7da-04b32457cdb1\" (UID: \"72da5def-cd27-4431-a7da-04b32457cdb1\") " Jan 05 22:11:09 crc kubenswrapper[4910]: I0105 22:11:09.767428 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72da5def-cd27-4431-a7da-04b32457cdb1-combined-ca-bundle\") pod \"72da5def-cd27-4431-a7da-04b32457cdb1\" (UID: \"72da5def-cd27-4431-a7da-04b32457cdb1\") " Jan 05 22:11:09 crc kubenswrapper[4910]: I0105 22:11:09.767689 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/72da5def-cd27-4431-a7da-04b32457cdb1-config-data\") pod \"72da5def-cd27-4431-a7da-04b32457cdb1\" (UID: \"72da5def-cd27-4431-a7da-04b32457cdb1\") " Jan 05 22:11:09 crc kubenswrapper[4910]: I0105 22:11:09.774858 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72da5def-cd27-4431-a7da-04b32457cdb1-kube-api-access-cxnhr" (OuterVolumeSpecName: "kube-api-access-cxnhr") pod "72da5def-cd27-4431-a7da-04b32457cdb1" (UID: "72da5def-cd27-4431-a7da-04b32457cdb1"). InnerVolumeSpecName "kube-api-access-cxnhr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:11:09 crc kubenswrapper[4910]: I0105 22:11:09.792627 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72da5def-cd27-4431-a7da-04b32457cdb1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "72da5def-cd27-4431-a7da-04b32457cdb1" (UID: "72da5def-cd27-4431-a7da-04b32457cdb1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:11:09 crc kubenswrapper[4910]: I0105 22:11:09.817293 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72da5def-cd27-4431-a7da-04b32457cdb1-config-data" (OuterVolumeSpecName: "config-data") pod "72da5def-cd27-4431-a7da-04b32457cdb1" (UID: "72da5def-cd27-4431-a7da-04b32457cdb1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:11:09 crc kubenswrapper[4910]: I0105 22:11:09.871108 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72da5def-cd27-4431-a7da-04b32457cdb1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:09 crc kubenswrapper[4910]: I0105 22:11:09.871174 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/72da5def-cd27-4431-a7da-04b32457cdb1-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:09 crc kubenswrapper[4910]: I0105 22:11:09.871187 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cxnhr\" (UniqueName: \"kubernetes.io/projected/72da5def-cd27-4431-a7da-04b32457cdb1-kube-api-access-cxnhr\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.352385 4910 generic.go:334] "Generic (PLEG): container finished" podID="569058f0-d9dd-45de-a0ce-dd38bb6ce341" containerID="04739ac8541b963a572f27c473632741c9601a8fcf8c88e6bb84e5530fcc2531" exitCode=0 Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.352481 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-75wjt" event={"ID":"569058f0-d9dd-45de-a0ce-dd38bb6ce341","Type":"ContainerDied","Data":"04739ac8541b963a572f27c473632741c9601a8fcf8c88e6bb84e5530fcc2531"} Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.357974 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-wb5hf" event={"ID":"72da5def-cd27-4431-a7da-04b32457cdb1","Type":"ContainerDied","Data":"c42c5bddf2d862abbf138dac2eadfbad795f48fb66c12a8fc8f9b9ad30ffc720"} Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.358027 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c42c5bddf2d862abbf138dac2eadfbad795f48fb66c12a8fc8f9b9ad30ffc720" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.358375 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-wb5hf" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.663772 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-77bbd879b9-grmqc"] Jan 05 22:11:10 crc kubenswrapper[4910]: E0105 22:11:10.664439 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2219fd99-b8c4-4918-8aa2-7f59a307dec5" containerName="mariadb-database-create" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.664465 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="2219fd99-b8c4-4918-8aa2-7f59a307dec5" containerName="mariadb-database-create" Jan 05 22:11:10 crc kubenswrapper[4910]: E0105 22:11:10.664487 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37ca269e-6da3-4dba-943f-6f2e957c8036" containerName="init" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.664495 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="37ca269e-6da3-4dba-943f-6f2e957c8036" containerName="init" Jan 05 22:11:10 crc kubenswrapper[4910]: E0105 22:11:10.664516 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8a77378-db31-4715-b92e-2edc06b352a5" containerName="mariadb-database-create" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.664526 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8a77378-db31-4715-b92e-2edc06b352a5" containerName="mariadb-database-create" Jan 05 22:11:10 crc kubenswrapper[4910]: E0105 22:11:10.664539 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37ca269e-6da3-4dba-943f-6f2e957c8036" containerName="dnsmasq-dns" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.664547 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="37ca269e-6da3-4dba-943f-6f2e957c8036" containerName="dnsmasq-dns" Jan 05 22:11:10 crc kubenswrapper[4910]: E0105 22:11:10.664554 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72da5def-cd27-4431-a7da-04b32457cdb1" containerName="keystone-db-sync" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.664564 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="72da5def-cd27-4431-a7da-04b32457cdb1" containerName="keystone-db-sync" Jan 05 22:11:10 crc kubenswrapper[4910]: E0105 22:11:10.664575 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="267d700c-88ab-4264-8ee9-cb3b02d10b23" containerName="mariadb-account-create-update" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.664583 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="267d700c-88ab-4264-8ee9-cb3b02d10b23" containerName="mariadb-account-create-update" Jan 05 22:11:10 crc kubenswrapper[4910]: E0105 22:11:10.664610 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f127b53b-cd48-48bd-b890-2dd47e1abd37" containerName="mariadb-database-create" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.664620 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f127b53b-cd48-48bd-b890-2dd47e1abd37" containerName="mariadb-database-create" Jan 05 22:11:10 crc kubenswrapper[4910]: E0105 22:11:10.664631 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba" containerName="mariadb-account-create-update" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.664639 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba" containerName="mariadb-account-create-update" Jan 05 22:11:10 crc kubenswrapper[4910]: E0105 22:11:10.664651 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf38198b-8f7d-4853-ba3d-e1968aa3a284" containerName="mariadb-account-create-update" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.664659 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf38198b-8f7d-4853-ba3d-e1968aa3a284" containerName="mariadb-account-create-update" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.664877 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="37ca269e-6da3-4dba-943f-6f2e957c8036" containerName="dnsmasq-dns" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.664894 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8a77378-db31-4715-b92e-2edc06b352a5" containerName="mariadb-database-create" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.664909 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="2219fd99-b8c4-4918-8aa2-7f59a307dec5" containerName="mariadb-database-create" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.664917 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="267d700c-88ab-4264-8ee9-cb3b02d10b23" containerName="mariadb-account-create-update" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.664933 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="72da5def-cd27-4431-a7da-04b32457cdb1" containerName="keystone-db-sync" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.664946 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf38198b-8f7d-4853-ba3d-e1968aa3a284" containerName="mariadb-account-create-update" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.664956 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f127b53b-cd48-48bd-b890-2dd47e1abd37" containerName="mariadb-database-create" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.664967 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba" containerName="mariadb-account-create-update" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.666074 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77bbd879b9-grmqc" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.679561 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-46j7w"] Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.680989 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-46j7w" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.682605 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.682961 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.683179 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.683400 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-t5hmz" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.686153 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.736709 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-46j7w"] Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.785550 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77bbd879b9-grmqc"] Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.788658 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/afd532df-36b6-45cf-94e1-0e7182b938ce-config\") pod \"dnsmasq-dns-77bbd879b9-grmqc\" (UID: \"afd532df-36b6-45cf-94e1-0e7182b938ce\") " pod="openstack/dnsmasq-dns-77bbd879b9-grmqc" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.788748 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/970366e8-fe49-4945-8223-072bf9227a15-combined-ca-bundle\") pod \"keystone-bootstrap-46j7w\" (UID: \"970366e8-fe49-4945-8223-072bf9227a15\") " pod="openstack/keystone-bootstrap-46j7w" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.788823 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/afd532df-36b6-45cf-94e1-0e7182b938ce-dns-svc\") pod \"dnsmasq-dns-77bbd879b9-grmqc\" (UID: \"afd532df-36b6-45cf-94e1-0e7182b938ce\") " pod="openstack/dnsmasq-dns-77bbd879b9-grmqc" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.788852 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/970366e8-fe49-4945-8223-072bf9227a15-config-data\") pod \"keystone-bootstrap-46j7w\" (UID: \"970366e8-fe49-4945-8223-072bf9227a15\") " pod="openstack/keystone-bootstrap-46j7w" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.788873 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/970366e8-fe49-4945-8223-072bf9227a15-fernet-keys\") pod \"keystone-bootstrap-46j7w\" (UID: \"970366e8-fe49-4945-8223-072bf9227a15\") " pod="openstack/keystone-bootstrap-46j7w" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.788941 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/970366e8-fe49-4945-8223-072bf9227a15-scripts\") pod \"keystone-bootstrap-46j7w\" (UID: \"970366e8-fe49-4945-8223-072bf9227a15\") " pod="openstack/keystone-bootstrap-46j7w" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.788970 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/afd532df-36b6-45cf-94e1-0e7182b938ce-ovsdbserver-sb\") pod \"dnsmasq-dns-77bbd879b9-grmqc\" (UID: \"afd532df-36b6-45cf-94e1-0e7182b938ce\") " pod="openstack/dnsmasq-dns-77bbd879b9-grmqc" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.789000 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x5d9j\" (UniqueName: \"kubernetes.io/projected/970366e8-fe49-4945-8223-072bf9227a15-kube-api-access-x5d9j\") pod \"keystone-bootstrap-46j7w\" (UID: \"970366e8-fe49-4945-8223-072bf9227a15\") " pod="openstack/keystone-bootstrap-46j7w" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.789036 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/afd532df-36b6-45cf-94e1-0e7182b938ce-dns-swift-storage-0\") pod \"dnsmasq-dns-77bbd879b9-grmqc\" (UID: \"afd532df-36b6-45cf-94e1-0e7182b938ce\") " pod="openstack/dnsmasq-dns-77bbd879b9-grmqc" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.789069 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/970366e8-fe49-4945-8223-072bf9227a15-credential-keys\") pod \"keystone-bootstrap-46j7w\" (UID: \"970366e8-fe49-4945-8223-072bf9227a15\") " pod="openstack/keystone-bootstrap-46j7w" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.789165 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/afd532df-36b6-45cf-94e1-0e7182b938ce-ovsdbserver-nb\") pod \"dnsmasq-dns-77bbd879b9-grmqc\" (UID: \"afd532df-36b6-45cf-94e1-0e7182b938ce\") " pod="openstack/dnsmasq-dns-77bbd879b9-grmqc" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.789200 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2ml5\" (UniqueName: \"kubernetes.io/projected/afd532df-36b6-45cf-94e1-0e7182b938ce-kube-api-access-j2ml5\") pod \"dnsmasq-dns-77bbd879b9-grmqc\" (UID: \"afd532df-36b6-45cf-94e1-0e7182b938ce\") " pod="openstack/dnsmasq-dns-77bbd879b9-grmqc" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.847066 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-4765c"] Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.848682 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-4765c" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.854297 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-jn5f9"] Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.855365 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-9hgph" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.855544 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-jn5f9" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.856278 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.856316 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.861492 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-zqtsn" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.861745 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.862008 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.890379 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-4765c"] Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.890929 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/726618d0-e442-410e-87df-33bca2cf52a4-etc-machine-id\") pod \"cinder-db-sync-jn5f9\" (UID: \"726618d0-e442-410e-87df-33bca2cf52a4\") " pod="openstack/cinder-db-sync-jn5f9" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.890970 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n8mlr\" (UniqueName: \"kubernetes.io/projected/b5a1f57c-578a-4396-95b1-e09d6ac92383-kube-api-access-n8mlr\") pod \"neutron-db-sync-4765c\" (UID: \"b5a1f57c-578a-4396-95b1-e09d6ac92383\") " pod="openstack/neutron-db-sync-4765c" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.891004 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5a1f57c-578a-4396-95b1-e09d6ac92383-combined-ca-bundle\") pod \"neutron-db-sync-4765c\" (UID: \"b5a1f57c-578a-4396-95b1-e09d6ac92383\") " pod="openstack/neutron-db-sync-4765c" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.891032 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/afd532df-36b6-45cf-94e1-0e7182b938ce-ovsdbserver-nb\") pod \"dnsmasq-dns-77bbd879b9-grmqc\" (UID: \"afd532df-36b6-45cf-94e1-0e7182b938ce\") " pod="openstack/dnsmasq-dns-77bbd879b9-grmqc" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.891054 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2ml5\" (UniqueName: \"kubernetes.io/projected/afd532df-36b6-45cf-94e1-0e7182b938ce-kube-api-access-j2ml5\") pod \"dnsmasq-dns-77bbd879b9-grmqc\" (UID: \"afd532df-36b6-45cf-94e1-0e7182b938ce\") " pod="openstack/dnsmasq-dns-77bbd879b9-grmqc" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.891074 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/afd532df-36b6-45cf-94e1-0e7182b938ce-config\") pod \"dnsmasq-dns-77bbd879b9-grmqc\" (UID: \"afd532df-36b6-45cf-94e1-0e7182b938ce\") " pod="openstack/dnsmasq-dns-77bbd879b9-grmqc" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.891094 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/970366e8-fe49-4945-8223-072bf9227a15-combined-ca-bundle\") pod \"keystone-bootstrap-46j7w\" (UID: \"970366e8-fe49-4945-8223-072bf9227a15\") " pod="openstack/keystone-bootstrap-46j7w" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.891134 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/726618d0-e442-410e-87df-33bca2cf52a4-config-data\") pod \"cinder-db-sync-jn5f9\" (UID: \"726618d0-e442-410e-87df-33bca2cf52a4\") " pod="openstack/cinder-db-sync-jn5f9" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.891164 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/726618d0-e442-410e-87df-33bca2cf52a4-scripts\") pod \"cinder-db-sync-jn5f9\" (UID: \"726618d0-e442-410e-87df-33bca2cf52a4\") " pod="openstack/cinder-db-sync-jn5f9" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.891200 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/afd532df-36b6-45cf-94e1-0e7182b938ce-dns-svc\") pod \"dnsmasq-dns-77bbd879b9-grmqc\" (UID: \"afd532df-36b6-45cf-94e1-0e7182b938ce\") " pod="openstack/dnsmasq-dns-77bbd879b9-grmqc" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.891225 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/970366e8-fe49-4945-8223-072bf9227a15-config-data\") pod \"keystone-bootstrap-46j7w\" (UID: \"970366e8-fe49-4945-8223-072bf9227a15\") " pod="openstack/keystone-bootstrap-46j7w" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.891242 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/970366e8-fe49-4945-8223-072bf9227a15-fernet-keys\") pod \"keystone-bootstrap-46j7w\" (UID: \"970366e8-fe49-4945-8223-072bf9227a15\") " pod="openstack/keystone-bootstrap-46j7w" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.891265 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/726618d0-e442-410e-87df-33bca2cf52a4-db-sync-config-data\") pod \"cinder-db-sync-jn5f9\" (UID: \"726618d0-e442-410e-87df-33bca2cf52a4\") " pod="openstack/cinder-db-sync-jn5f9" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.891292 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fxjfv\" (UniqueName: \"kubernetes.io/projected/726618d0-e442-410e-87df-33bca2cf52a4-kube-api-access-fxjfv\") pod \"cinder-db-sync-jn5f9\" (UID: \"726618d0-e442-410e-87df-33bca2cf52a4\") " pod="openstack/cinder-db-sync-jn5f9" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.891321 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/970366e8-fe49-4945-8223-072bf9227a15-scripts\") pod \"keystone-bootstrap-46j7w\" (UID: \"970366e8-fe49-4945-8223-072bf9227a15\") " pod="openstack/keystone-bootstrap-46j7w" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.891343 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/afd532df-36b6-45cf-94e1-0e7182b938ce-ovsdbserver-sb\") pod \"dnsmasq-dns-77bbd879b9-grmqc\" (UID: \"afd532df-36b6-45cf-94e1-0e7182b938ce\") " pod="openstack/dnsmasq-dns-77bbd879b9-grmqc" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.891365 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x5d9j\" (UniqueName: \"kubernetes.io/projected/970366e8-fe49-4945-8223-072bf9227a15-kube-api-access-x5d9j\") pod \"keystone-bootstrap-46j7w\" (UID: \"970366e8-fe49-4945-8223-072bf9227a15\") " pod="openstack/keystone-bootstrap-46j7w" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.891392 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/afd532df-36b6-45cf-94e1-0e7182b938ce-dns-swift-storage-0\") pod \"dnsmasq-dns-77bbd879b9-grmqc\" (UID: \"afd532df-36b6-45cf-94e1-0e7182b938ce\") " pod="openstack/dnsmasq-dns-77bbd879b9-grmqc" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.891415 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/726618d0-e442-410e-87df-33bca2cf52a4-combined-ca-bundle\") pod \"cinder-db-sync-jn5f9\" (UID: \"726618d0-e442-410e-87df-33bca2cf52a4\") " pod="openstack/cinder-db-sync-jn5f9" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.891442 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/970366e8-fe49-4945-8223-072bf9227a15-credential-keys\") pod \"keystone-bootstrap-46j7w\" (UID: \"970366e8-fe49-4945-8223-072bf9227a15\") " pod="openstack/keystone-bootstrap-46j7w" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.891475 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b5a1f57c-578a-4396-95b1-e09d6ac92383-config\") pod \"neutron-db-sync-4765c\" (UID: \"b5a1f57c-578a-4396-95b1-e09d6ac92383\") " pod="openstack/neutron-db-sync-4765c" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.892701 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/afd532df-36b6-45cf-94e1-0e7182b938ce-ovsdbserver-nb\") pod \"dnsmasq-dns-77bbd879b9-grmqc\" (UID: \"afd532df-36b6-45cf-94e1-0e7182b938ce\") " pod="openstack/dnsmasq-dns-77bbd879b9-grmqc" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.893825 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/afd532df-36b6-45cf-94e1-0e7182b938ce-dns-swift-storage-0\") pod \"dnsmasq-dns-77bbd879b9-grmqc\" (UID: \"afd532df-36b6-45cf-94e1-0e7182b938ce\") " pod="openstack/dnsmasq-dns-77bbd879b9-grmqc" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.915590 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/afd532df-36b6-45cf-94e1-0e7182b938ce-config\") pod \"dnsmasq-dns-77bbd879b9-grmqc\" (UID: \"afd532df-36b6-45cf-94e1-0e7182b938ce\") " pod="openstack/dnsmasq-dns-77bbd879b9-grmqc" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.916047 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/970366e8-fe49-4945-8223-072bf9227a15-scripts\") pod \"keystone-bootstrap-46j7w\" (UID: \"970366e8-fe49-4945-8223-072bf9227a15\") " pod="openstack/keystone-bootstrap-46j7w" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.917328 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/970366e8-fe49-4945-8223-072bf9227a15-fernet-keys\") pod \"keystone-bootstrap-46j7w\" (UID: \"970366e8-fe49-4945-8223-072bf9227a15\") " pod="openstack/keystone-bootstrap-46j7w" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.918431 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/afd532df-36b6-45cf-94e1-0e7182b938ce-ovsdbserver-sb\") pod \"dnsmasq-dns-77bbd879b9-grmqc\" (UID: \"afd532df-36b6-45cf-94e1-0e7182b938ce\") " pod="openstack/dnsmasq-dns-77bbd879b9-grmqc" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.918970 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/afd532df-36b6-45cf-94e1-0e7182b938ce-dns-svc\") pod \"dnsmasq-dns-77bbd879b9-grmqc\" (UID: \"afd532df-36b6-45cf-94e1-0e7182b938ce\") " pod="openstack/dnsmasq-dns-77bbd879b9-grmqc" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.954045 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.954110 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.964019 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/970366e8-fe49-4945-8223-072bf9227a15-credential-keys\") pod \"keystone-bootstrap-46j7w\" (UID: \"970366e8-fe49-4945-8223-072bf9227a15\") " pod="openstack/keystone-bootstrap-46j7w" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.964283 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/970366e8-fe49-4945-8223-072bf9227a15-config-data\") pod \"keystone-bootstrap-46j7w\" (UID: \"970366e8-fe49-4945-8223-072bf9227a15\") " pod="openstack/keystone-bootstrap-46j7w" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.965320 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/970366e8-fe49-4945-8223-072bf9227a15-combined-ca-bundle\") pod \"keystone-bootstrap-46j7w\" (UID: \"970366e8-fe49-4945-8223-072bf9227a15\") " pod="openstack/keystone-bootstrap-46j7w" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.969531 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5d9j\" (UniqueName: \"kubernetes.io/projected/970366e8-fe49-4945-8223-072bf9227a15-kube-api-access-x5d9j\") pod \"keystone-bootstrap-46j7w\" (UID: \"970366e8-fe49-4945-8223-072bf9227a15\") " pod="openstack/keystone-bootstrap-46j7w" Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.978593 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-jn5f9"] Jan 05 22:11:10 crc kubenswrapper[4910]: I0105 22:11:10.982873 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2ml5\" (UniqueName: \"kubernetes.io/projected/afd532df-36b6-45cf-94e1-0e7182b938ce-kube-api-access-j2ml5\") pod \"dnsmasq-dns-77bbd879b9-grmqc\" (UID: \"afd532df-36b6-45cf-94e1-0e7182b938ce\") " pod="openstack/dnsmasq-dns-77bbd879b9-grmqc" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:10.997836 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77bbd879b9-grmqc" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:10.998538 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/726618d0-e442-410e-87df-33bca2cf52a4-config-data\") pod \"cinder-db-sync-jn5f9\" (UID: \"726618d0-e442-410e-87df-33bca2cf52a4\") " pod="openstack/cinder-db-sync-jn5f9" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.013669 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/726618d0-e442-410e-87df-33bca2cf52a4-scripts\") pod \"cinder-db-sync-jn5f9\" (UID: \"726618d0-e442-410e-87df-33bca2cf52a4\") " pod="openstack/cinder-db-sync-jn5f9" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.013742 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/726618d0-e442-410e-87df-33bca2cf52a4-db-sync-config-data\") pod \"cinder-db-sync-jn5f9\" (UID: \"726618d0-e442-410e-87df-33bca2cf52a4\") " pod="openstack/cinder-db-sync-jn5f9" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.013797 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fxjfv\" (UniqueName: \"kubernetes.io/projected/726618d0-e442-410e-87df-33bca2cf52a4-kube-api-access-fxjfv\") pod \"cinder-db-sync-jn5f9\" (UID: \"726618d0-e442-410e-87df-33bca2cf52a4\") " pod="openstack/cinder-db-sync-jn5f9" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.013863 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/726618d0-e442-410e-87df-33bca2cf52a4-combined-ca-bundle\") pod \"cinder-db-sync-jn5f9\" (UID: \"726618d0-e442-410e-87df-33bca2cf52a4\") " pod="openstack/cinder-db-sync-jn5f9" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.013912 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b5a1f57c-578a-4396-95b1-e09d6ac92383-config\") pod \"neutron-db-sync-4765c\" (UID: \"b5a1f57c-578a-4396-95b1-e09d6ac92383\") " pod="openstack/neutron-db-sync-4765c" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.013979 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/726618d0-e442-410e-87df-33bca2cf52a4-etc-machine-id\") pod \"cinder-db-sync-jn5f9\" (UID: \"726618d0-e442-410e-87df-33bca2cf52a4\") " pod="openstack/cinder-db-sync-jn5f9" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.014003 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n8mlr\" (UniqueName: \"kubernetes.io/projected/b5a1f57c-578a-4396-95b1-e09d6ac92383-kube-api-access-n8mlr\") pod \"neutron-db-sync-4765c\" (UID: \"b5a1f57c-578a-4396-95b1-e09d6ac92383\") " pod="openstack/neutron-db-sync-4765c" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.014036 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5a1f57c-578a-4396-95b1-e09d6ac92383-combined-ca-bundle\") pod \"neutron-db-sync-4765c\" (UID: \"b5a1f57c-578a-4396-95b1-e09d6ac92383\") " pod="openstack/neutron-db-sync-4765c" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.009042 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/726618d0-e442-410e-87df-33bca2cf52a4-config-data\") pod \"cinder-db-sync-jn5f9\" (UID: \"726618d0-e442-410e-87df-33bca2cf52a4\") " pod="openstack/cinder-db-sync-jn5f9" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.016320 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/726618d0-e442-410e-87df-33bca2cf52a4-etc-machine-id\") pod \"cinder-db-sync-jn5f9\" (UID: \"726618d0-e442-410e-87df-33bca2cf52a4\") " pod="openstack/cinder-db-sync-jn5f9" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.020506 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-46j7w" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.027480 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.029956 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/b5a1f57c-578a-4396-95b1-e09d6ac92383-config\") pod \"neutron-db-sync-4765c\" (UID: \"b5a1f57c-578a-4396-95b1-e09d6ac92383\") " pod="openstack/neutron-db-sync-4765c" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.034472 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/726618d0-e442-410e-87df-33bca2cf52a4-scripts\") pod \"cinder-db-sync-jn5f9\" (UID: \"726618d0-e442-410e-87df-33bca2cf52a4\") " pod="openstack/cinder-db-sync-jn5f9" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.039544 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.053974 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/726618d0-e442-410e-87df-33bca2cf52a4-db-sync-config-data\") pod \"cinder-db-sync-jn5f9\" (UID: \"726618d0-e442-410e-87df-33bca2cf52a4\") " pod="openstack/cinder-db-sync-jn5f9" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.067398 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.067713 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.099819 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fxjfv\" (UniqueName: \"kubernetes.io/projected/726618d0-e442-410e-87df-33bca2cf52a4-kube-api-access-fxjfv\") pod \"cinder-db-sync-jn5f9\" (UID: \"726618d0-e442-410e-87df-33bca2cf52a4\") " pod="openstack/cinder-db-sync-jn5f9" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.101539 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n8mlr\" (UniqueName: \"kubernetes.io/projected/b5a1f57c-578a-4396-95b1-e09d6ac92383-kube-api-access-n8mlr\") pod \"neutron-db-sync-4765c\" (UID: \"b5a1f57c-578a-4396-95b1-e09d6ac92383\") " pod="openstack/neutron-db-sync-4765c" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.103368 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/726618d0-e442-410e-87df-33bca2cf52a4-combined-ca-bundle\") pod \"cinder-db-sync-jn5f9\" (UID: \"726618d0-e442-410e-87df-33bca2cf52a4\") " pod="openstack/cinder-db-sync-jn5f9" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.103888 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5a1f57c-578a-4396-95b1-e09d6ac92383-combined-ca-bundle\") pod \"neutron-db-sync-4765c\" (UID: \"b5a1f57c-578a-4396-95b1-e09d6ac92383\") " pod="openstack/neutron-db-sync-4765c" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.121472 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.169796 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-ktvmp"] Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.171065 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-ktvmp" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.173790 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-4765c" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.174029 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-rsg6c" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.182832 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-ktvmp"] Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.184520 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.195325 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-jn5f9" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.207880 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-dgttl"] Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.209183 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-dgttl" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.214964 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-7vmps" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.216211 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.216311 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.217267 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\") " pod="openstack/ceilometer-0" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.217317 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z9wxw\" (UniqueName: \"kubernetes.io/projected/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-kube-api-access-z9wxw\") pod \"ceilometer-0\" (UID: \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\") " pod="openstack/ceilometer-0" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.217342 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-run-httpd\") pod \"ceilometer-0\" (UID: \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\") " pod="openstack/ceilometer-0" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.217360 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-config-data\") pod \"ceilometer-0\" (UID: \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\") " pod="openstack/ceilometer-0" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.217398 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-scripts\") pod \"ceilometer-0\" (UID: \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\") " pod="openstack/ceilometer-0" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.217420 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-log-httpd\") pod \"ceilometer-0\" (UID: \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\") " pod="openstack/ceilometer-0" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.217446 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\") " pod="openstack/ceilometer-0" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.228909 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77bbd879b9-grmqc"] Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.247238 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-dgttl"] Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.272203 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8495b76777-brtjp"] Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.277140 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8495b76777-brtjp" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.292761 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8495b76777-brtjp"] Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.320226 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-log-httpd\") pod \"ceilometer-0\" (UID: \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\") " pod="openstack/ceilometer-0" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.320281 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b94e459d-172c-41ca-a38c-384a5f3e323e-db-sync-config-data\") pod \"barbican-db-sync-ktvmp\" (UID: \"b94e459d-172c-41ca-a38c-384a5f3e323e\") " pod="openstack/barbican-db-sync-ktvmp" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.320315 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dn5qm\" (UniqueName: \"kubernetes.io/projected/4432a67a-7276-4f55-838d-b685529581d5-kube-api-access-dn5qm\") pod \"placement-db-sync-dgttl\" (UID: \"4432a67a-7276-4f55-838d-b685529581d5\") " pod="openstack/placement-db-sync-dgttl" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.320335 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\") " pod="openstack/ceilometer-0" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.320364 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mzjv2\" (UniqueName: \"kubernetes.io/projected/b94e459d-172c-41ca-a38c-384a5f3e323e-kube-api-access-mzjv2\") pod \"barbican-db-sync-ktvmp\" (UID: \"b94e459d-172c-41ca-a38c-384a5f3e323e\") " pod="openstack/barbican-db-sync-ktvmp" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.320394 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b94e459d-172c-41ca-a38c-384a5f3e323e-combined-ca-bundle\") pod \"barbican-db-sync-ktvmp\" (UID: \"b94e459d-172c-41ca-a38c-384a5f3e323e\") " pod="openstack/barbican-db-sync-ktvmp" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.320413 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4432a67a-7276-4f55-838d-b685529581d5-combined-ca-bundle\") pod \"placement-db-sync-dgttl\" (UID: \"4432a67a-7276-4f55-838d-b685529581d5\") " pod="openstack/placement-db-sync-dgttl" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.320458 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4432a67a-7276-4f55-838d-b685529581d5-config-data\") pod \"placement-db-sync-dgttl\" (UID: \"4432a67a-7276-4f55-838d-b685529581d5\") " pod="openstack/placement-db-sync-dgttl" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.320476 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\") " pod="openstack/ceilometer-0" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.320492 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4432a67a-7276-4f55-838d-b685529581d5-logs\") pod \"placement-db-sync-dgttl\" (UID: \"4432a67a-7276-4f55-838d-b685529581d5\") " pod="openstack/placement-db-sync-dgttl" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.320511 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4432a67a-7276-4f55-838d-b685529581d5-scripts\") pod \"placement-db-sync-dgttl\" (UID: \"4432a67a-7276-4f55-838d-b685529581d5\") " pod="openstack/placement-db-sync-dgttl" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.320537 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z9wxw\" (UniqueName: \"kubernetes.io/projected/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-kube-api-access-z9wxw\") pod \"ceilometer-0\" (UID: \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\") " pod="openstack/ceilometer-0" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.320561 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-run-httpd\") pod \"ceilometer-0\" (UID: \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\") " pod="openstack/ceilometer-0" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.320582 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-config-data\") pod \"ceilometer-0\" (UID: \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\") " pod="openstack/ceilometer-0" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.320621 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-scripts\") pod \"ceilometer-0\" (UID: \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\") " pod="openstack/ceilometer-0" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.327169 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-scripts\") pod \"ceilometer-0\" (UID: \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\") " pod="openstack/ceilometer-0" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.327693 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-log-httpd\") pod \"ceilometer-0\" (UID: \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\") " pod="openstack/ceilometer-0" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.327870 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-run-httpd\") pod \"ceilometer-0\" (UID: \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\") " pod="openstack/ceilometer-0" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.330876 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\") " pod="openstack/ceilometer-0" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.331294 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-config-data\") pod \"ceilometer-0\" (UID: \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\") " pod="openstack/ceilometer-0" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.333597 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\") " pod="openstack/ceilometer-0" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.345801 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z9wxw\" (UniqueName: \"kubernetes.io/projected/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-kube-api-access-z9wxw\") pod \"ceilometer-0\" (UID: \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\") " pod="openstack/ceilometer-0" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.422200 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4432a67a-7276-4f55-838d-b685529581d5-config-data\") pod \"placement-db-sync-dgttl\" (UID: \"4432a67a-7276-4f55-838d-b685529581d5\") " pod="openstack/placement-db-sync-dgttl" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.422618 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/747be938-8771-4bb6-8279-2a1e5607507d-ovsdbserver-nb\") pod \"dnsmasq-dns-8495b76777-brtjp\" (UID: \"747be938-8771-4bb6-8279-2a1e5607507d\") " pod="openstack/dnsmasq-dns-8495b76777-brtjp" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.422640 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4432a67a-7276-4f55-838d-b685529581d5-logs\") pod \"placement-db-sync-dgttl\" (UID: \"4432a67a-7276-4f55-838d-b685529581d5\") " pod="openstack/placement-db-sync-dgttl" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.422658 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4432a67a-7276-4f55-838d-b685529581d5-scripts\") pod \"placement-db-sync-dgttl\" (UID: \"4432a67a-7276-4f55-838d-b685529581d5\") " pod="openstack/placement-db-sync-dgttl" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.422687 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/747be938-8771-4bb6-8279-2a1e5607507d-config\") pod \"dnsmasq-dns-8495b76777-brtjp\" (UID: \"747be938-8771-4bb6-8279-2a1e5607507d\") " pod="openstack/dnsmasq-dns-8495b76777-brtjp" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.422733 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k5jtq\" (UniqueName: \"kubernetes.io/projected/747be938-8771-4bb6-8279-2a1e5607507d-kube-api-access-k5jtq\") pod \"dnsmasq-dns-8495b76777-brtjp\" (UID: \"747be938-8771-4bb6-8279-2a1e5607507d\") " pod="openstack/dnsmasq-dns-8495b76777-brtjp" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.422758 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/747be938-8771-4bb6-8279-2a1e5607507d-ovsdbserver-sb\") pod \"dnsmasq-dns-8495b76777-brtjp\" (UID: \"747be938-8771-4bb6-8279-2a1e5607507d\") " pod="openstack/dnsmasq-dns-8495b76777-brtjp" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.422789 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b94e459d-172c-41ca-a38c-384a5f3e323e-db-sync-config-data\") pod \"barbican-db-sync-ktvmp\" (UID: \"b94e459d-172c-41ca-a38c-384a5f3e323e\") " pod="openstack/barbican-db-sync-ktvmp" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.422814 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dn5qm\" (UniqueName: \"kubernetes.io/projected/4432a67a-7276-4f55-838d-b685529581d5-kube-api-access-dn5qm\") pod \"placement-db-sync-dgttl\" (UID: \"4432a67a-7276-4f55-838d-b685529581d5\") " pod="openstack/placement-db-sync-dgttl" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.422836 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mzjv2\" (UniqueName: \"kubernetes.io/projected/b94e459d-172c-41ca-a38c-384a5f3e323e-kube-api-access-mzjv2\") pod \"barbican-db-sync-ktvmp\" (UID: \"b94e459d-172c-41ca-a38c-384a5f3e323e\") " pod="openstack/barbican-db-sync-ktvmp" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.422859 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/747be938-8771-4bb6-8279-2a1e5607507d-dns-swift-storage-0\") pod \"dnsmasq-dns-8495b76777-brtjp\" (UID: \"747be938-8771-4bb6-8279-2a1e5607507d\") " pod="openstack/dnsmasq-dns-8495b76777-brtjp" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.422878 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b94e459d-172c-41ca-a38c-384a5f3e323e-combined-ca-bundle\") pod \"barbican-db-sync-ktvmp\" (UID: \"b94e459d-172c-41ca-a38c-384a5f3e323e\") " pod="openstack/barbican-db-sync-ktvmp" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.422896 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4432a67a-7276-4f55-838d-b685529581d5-combined-ca-bundle\") pod \"placement-db-sync-dgttl\" (UID: \"4432a67a-7276-4f55-838d-b685529581d5\") " pod="openstack/placement-db-sync-dgttl" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.422913 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/747be938-8771-4bb6-8279-2a1e5607507d-dns-svc\") pod \"dnsmasq-dns-8495b76777-brtjp\" (UID: \"747be938-8771-4bb6-8279-2a1e5607507d\") " pod="openstack/dnsmasq-dns-8495b76777-brtjp" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.425532 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4432a67a-7276-4f55-838d-b685529581d5-logs\") pod \"placement-db-sync-dgttl\" (UID: \"4432a67a-7276-4f55-838d-b685529581d5\") " pod="openstack/placement-db-sync-dgttl" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.430042 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4432a67a-7276-4f55-838d-b685529581d5-config-data\") pod \"placement-db-sync-dgttl\" (UID: \"4432a67a-7276-4f55-838d-b685529581d5\") " pod="openstack/placement-db-sync-dgttl" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.432618 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4432a67a-7276-4f55-838d-b685529581d5-scripts\") pod \"placement-db-sync-dgttl\" (UID: \"4432a67a-7276-4f55-838d-b685529581d5\") " pod="openstack/placement-db-sync-dgttl" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.434507 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.434815 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b94e459d-172c-41ca-a38c-384a5f3e323e-combined-ca-bundle\") pod \"barbican-db-sync-ktvmp\" (UID: \"b94e459d-172c-41ca-a38c-384a5f3e323e\") " pod="openstack/barbican-db-sync-ktvmp" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.442583 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4432a67a-7276-4f55-838d-b685529581d5-combined-ca-bundle\") pod \"placement-db-sync-dgttl\" (UID: \"4432a67a-7276-4f55-838d-b685529581d5\") " pod="openstack/placement-db-sync-dgttl" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.445830 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b94e459d-172c-41ca-a38c-384a5f3e323e-db-sync-config-data\") pod \"barbican-db-sync-ktvmp\" (UID: \"b94e459d-172c-41ca-a38c-384a5f3e323e\") " pod="openstack/barbican-db-sync-ktvmp" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.451477 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dn5qm\" (UniqueName: \"kubernetes.io/projected/4432a67a-7276-4f55-838d-b685529581d5-kube-api-access-dn5qm\") pod \"placement-db-sync-dgttl\" (UID: \"4432a67a-7276-4f55-838d-b685529581d5\") " pod="openstack/placement-db-sync-dgttl" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.453438 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mzjv2\" (UniqueName: \"kubernetes.io/projected/b94e459d-172c-41ca-a38c-384a5f3e323e-kube-api-access-mzjv2\") pod \"barbican-db-sync-ktvmp\" (UID: \"b94e459d-172c-41ca-a38c-384a5f3e323e\") " pod="openstack/barbican-db-sync-ktvmp" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.502444 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-ktvmp" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.526342 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k5jtq\" (UniqueName: \"kubernetes.io/projected/747be938-8771-4bb6-8279-2a1e5607507d-kube-api-access-k5jtq\") pod \"dnsmasq-dns-8495b76777-brtjp\" (UID: \"747be938-8771-4bb6-8279-2a1e5607507d\") " pod="openstack/dnsmasq-dns-8495b76777-brtjp" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.526400 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/747be938-8771-4bb6-8279-2a1e5607507d-ovsdbserver-sb\") pod \"dnsmasq-dns-8495b76777-brtjp\" (UID: \"747be938-8771-4bb6-8279-2a1e5607507d\") " pod="openstack/dnsmasq-dns-8495b76777-brtjp" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.526465 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/747be938-8771-4bb6-8279-2a1e5607507d-dns-swift-storage-0\") pod \"dnsmasq-dns-8495b76777-brtjp\" (UID: \"747be938-8771-4bb6-8279-2a1e5607507d\") " pod="openstack/dnsmasq-dns-8495b76777-brtjp" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.526491 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/747be938-8771-4bb6-8279-2a1e5607507d-dns-svc\") pod \"dnsmasq-dns-8495b76777-brtjp\" (UID: \"747be938-8771-4bb6-8279-2a1e5607507d\") " pod="openstack/dnsmasq-dns-8495b76777-brtjp" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.526530 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/747be938-8771-4bb6-8279-2a1e5607507d-ovsdbserver-nb\") pod \"dnsmasq-dns-8495b76777-brtjp\" (UID: \"747be938-8771-4bb6-8279-2a1e5607507d\") " pod="openstack/dnsmasq-dns-8495b76777-brtjp" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.526566 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/747be938-8771-4bb6-8279-2a1e5607507d-config\") pod \"dnsmasq-dns-8495b76777-brtjp\" (UID: \"747be938-8771-4bb6-8279-2a1e5607507d\") " pod="openstack/dnsmasq-dns-8495b76777-brtjp" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.527465 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/747be938-8771-4bb6-8279-2a1e5607507d-config\") pod \"dnsmasq-dns-8495b76777-brtjp\" (UID: \"747be938-8771-4bb6-8279-2a1e5607507d\") " pod="openstack/dnsmasq-dns-8495b76777-brtjp" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.527631 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/747be938-8771-4bb6-8279-2a1e5607507d-dns-swift-storage-0\") pod \"dnsmasq-dns-8495b76777-brtjp\" (UID: \"747be938-8771-4bb6-8279-2a1e5607507d\") " pod="openstack/dnsmasq-dns-8495b76777-brtjp" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.528042 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/747be938-8771-4bb6-8279-2a1e5607507d-dns-svc\") pod \"dnsmasq-dns-8495b76777-brtjp\" (UID: \"747be938-8771-4bb6-8279-2a1e5607507d\") " pod="openstack/dnsmasq-dns-8495b76777-brtjp" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.528350 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/747be938-8771-4bb6-8279-2a1e5607507d-ovsdbserver-sb\") pod \"dnsmasq-dns-8495b76777-brtjp\" (UID: \"747be938-8771-4bb6-8279-2a1e5607507d\") " pod="openstack/dnsmasq-dns-8495b76777-brtjp" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.528621 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/747be938-8771-4bb6-8279-2a1e5607507d-ovsdbserver-nb\") pod \"dnsmasq-dns-8495b76777-brtjp\" (UID: \"747be938-8771-4bb6-8279-2a1e5607507d\") " pod="openstack/dnsmasq-dns-8495b76777-brtjp" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.536778 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-dgttl" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.546875 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k5jtq\" (UniqueName: \"kubernetes.io/projected/747be938-8771-4bb6-8279-2a1e5607507d-kube-api-access-k5jtq\") pod \"dnsmasq-dns-8495b76777-brtjp\" (UID: \"747be938-8771-4bb6-8279-2a1e5607507d\") " pod="openstack/dnsmasq-dns-8495b76777-brtjp" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.603452 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8495b76777-brtjp" Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.623219 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-46j7w"] Jan 05 22:11:11 crc kubenswrapper[4910]: W0105 22:11:11.646003 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod970366e8_fe49_4945_8223_072bf9227a15.slice/crio-e55891592eecad8d1174d74b0f0fd65bfeb9973b8bf9865f74d40c16d77d9f9d WatchSource:0}: Error finding container e55891592eecad8d1174d74b0f0fd65bfeb9973b8bf9865f74d40c16d77d9f9d: Status 404 returned error can't find the container with id e55891592eecad8d1174d74b0f0fd65bfeb9973b8bf9865f74d40c16d77d9f9d Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.700020 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77bbd879b9-grmqc"] Jan 05 22:11:11 crc kubenswrapper[4910]: I0105 22:11:11.805032 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-4765c"] Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:11.957016 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-jn5f9"] Jan 05 22:11:12 crc kubenswrapper[4910]: W0105 22:11:11.964346 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod726618d0_e442_410e_87df_33bca2cf52a4.slice/crio-ec5bba4875b3542eec5ac741aa889c5e5c96d1f306caf6e06c379c15ba2e7a5c WatchSource:0}: Error finding container ec5bba4875b3542eec5ac741aa889c5e5c96d1f306caf6e06c379c15ba2e7a5c: Status 404 returned error can't find the container with id ec5bba4875b3542eec5ac741aa889c5e5c96d1f306caf6e06c379c15ba2e7a5c Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:12.070157 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-75wjt" Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:12.240815 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/569058f0-d9dd-45de-a0ce-dd38bb6ce341-combined-ca-bundle\") pod \"569058f0-d9dd-45de-a0ce-dd38bb6ce341\" (UID: \"569058f0-d9dd-45de-a0ce-dd38bb6ce341\") " Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:12.240876 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2kxkn\" (UniqueName: \"kubernetes.io/projected/569058f0-d9dd-45de-a0ce-dd38bb6ce341-kube-api-access-2kxkn\") pod \"569058f0-d9dd-45de-a0ce-dd38bb6ce341\" (UID: \"569058f0-d9dd-45de-a0ce-dd38bb6ce341\") " Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:12.240914 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/569058f0-d9dd-45de-a0ce-dd38bb6ce341-db-sync-config-data\") pod \"569058f0-d9dd-45de-a0ce-dd38bb6ce341\" (UID: \"569058f0-d9dd-45de-a0ce-dd38bb6ce341\") " Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:12.240968 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/569058f0-d9dd-45de-a0ce-dd38bb6ce341-config-data\") pod \"569058f0-d9dd-45de-a0ce-dd38bb6ce341\" (UID: \"569058f0-d9dd-45de-a0ce-dd38bb6ce341\") " Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:12.248356 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/569058f0-d9dd-45de-a0ce-dd38bb6ce341-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "569058f0-d9dd-45de-a0ce-dd38bb6ce341" (UID: "569058f0-d9dd-45de-a0ce-dd38bb6ce341"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:12.249268 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/569058f0-d9dd-45de-a0ce-dd38bb6ce341-kube-api-access-2kxkn" (OuterVolumeSpecName: "kube-api-access-2kxkn") pod "569058f0-d9dd-45de-a0ce-dd38bb6ce341" (UID: "569058f0-d9dd-45de-a0ce-dd38bb6ce341"). InnerVolumeSpecName "kube-api-access-2kxkn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:12.276536 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/569058f0-d9dd-45de-a0ce-dd38bb6ce341-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "569058f0-d9dd-45de-a0ce-dd38bb6ce341" (UID: "569058f0-d9dd-45de-a0ce-dd38bb6ce341"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:12.304277 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/569058f0-d9dd-45de-a0ce-dd38bb6ce341-config-data" (OuterVolumeSpecName: "config-data") pod "569058f0-d9dd-45de-a0ce-dd38bb6ce341" (UID: "569058f0-d9dd-45de-a0ce-dd38bb6ce341"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:12.342577 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/569058f0-d9dd-45de-a0ce-dd38bb6ce341-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:12.342608 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2kxkn\" (UniqueName: \"kubernetes.io/projected/569058f0-d9dd-45de-a0ce-dd38bb6ce341-kube-api-access-2kxkn\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:12.342619 4910 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/569058f0-d9dd-45de-a0ce-dd38bb6ce341-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:12.342628 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/569058f0-d9dd-45de-a0ce-dd38bb6ce341-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:12.380910 4910 generic.go:334] "Generic (PLEG): container finished" podID="afd532df-36b6-45cf-94e1-0e7182b938ce" containerID="4f574442341e268414fe2cd5561245804e6af6ccc1e963a2268387892e7fe7fd" exitCode=0 Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:12.380991 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77bbd879b9-grmqc" event={"ID":"afd532df-36b6-45cf-94e1-0e7182b938ce","Type":"ContainerDied","Data":"4f574442341e268414fe2cd5561245804e6af6ccc1e963a2268387892e7fe7fd"} Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:12.381023 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77bbd879b9-grmqc" event={"ID":"afd532df-36b6-45cf-94e1-0e7182b938ce","Type":"ContainerStarted","Data":"4335a1c9d0753ab65631cf672365f5378ffb1ea65eba3dacd5e6526c594ab4ab"} Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:12.389372 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-46j7w" event={"ID":"970366e8-fe49-4945-8223-072bf9227a15","Type":"ContainerStarted","Data":"ca4ab0b3e97c3a0c6d935237ab326c6797dbb8d63f989b52bb60164c52dd940e"} Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:12.389418 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-46j7w" event={"ID":"970366e8-fe49-4945-8223-072bf9227a15","Type":"ContainerStarted","Data":"e55891592eecad8d1174d74b0f0fd65bfeb9973b8bf9865f74d40c16d77d9f9d"} Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:12.393862 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-75wjt" event={"ID":"569058f0-d9dd-45de-a0ce-dd38bb6ce341","Type":"ContainerDied","Data":"e1a5d79bebceff73833872a9b2bf9fbbf2fd0129b593572bba9ee308f6fc3b5b"} Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:12.393882 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e1a5d79bebceff73833872a9b2bf9fbbf2fd0129b593572bba9ee308f6fc3b5b" Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:12.393924 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-75wjt" Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:12.428793 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-4765c" event={"ID":"b5a1f57c-578a-4396-95b1-e09d6ac92383","Type":"ContainerStarted","Data":"f0973ba8f153769879aba399ae8990d5d2d00746e48c5db9253cff5df721d6f9"} Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:12.428853 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-4765c" event={"ID":"b5a1f57c-578a-4396-95b1-e09d6ac92383","Type":"ContainerStarted","Data":"69254fbc641112c4b31c47925e9625849e1913994d0de8336371e41b5b2e7077"} Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:12.434033 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-jn5f9" event={"ID":"726618d0-e442-410e-87df-33bca2cf52a4","Type":"ContainerStarted","Data":"ec5bba4875b3542eec5ac741aa889c5e5c96d1f306caf6e06c379c15ba2e7a5c"} Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:12.466075 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-46j7w" podStartSLOduration=2.465096777 podStartE2EDuration="2.465096777s" podCreationTimestamp="2026-01-05 22:11:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:11:12.445483704 +0000 UTC m=+1204.022981374" watchObservedRunningTime="2026-01-05 22:11:12.465096777 +0000 UTC m=+1204.042594447" Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:12.483551 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-4765c" podStartSLOduration=2.483525122 podStartE2EDuration="2.483525122s" podCreationTimestamp="2026-01-05 22:11:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:11:12.472403834 +0000 UTC m=+1204.049901504" watchObservedRunningTime="2026-01-05 22:11:12.483525122 +0000 UTC m=+1204.061022792" Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:12.885948 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8495b76777-brtjp"] Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:12.928439 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-66567888d7-qkxjs"] Jan 05 22:11:12 crc kubenswrapper[4910]: E0105 22:11:12.928818 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="569058f0-d9dd-45de-a0ce-dd38bb6ce341" containerName="glance-db-sync" Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:12.928831 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="569058f0-d9dd-45de-a0ce-dd38bb6ce341" containerName="glance-db-sync" Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:12.928992 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="569058f0-d9dd-45de-a0ce-dd38bb6ce341" containerName="glance-db-sync" Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:12.929898 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66567888d7-qkxjs" Jan 05 22:11:12 crc kubenswrapper[4910]: I0105 22:11:12.942407 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-66567888d7-qkxjs"] Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.077948 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-dgttl"] Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.097326 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/be9110f6-9a1a-4e05-8e41-0eb8630686cc-ovsdbserver-sb\") pod \"dnsmasq-dns-66567888d7-qkxjs\" (UID: \"be9110f6-9a1a-4e05-8e41-0eb8630686cc\") " pod="openstack/dnsmasq-dns-66567888d7-qkxjs" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.097394 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/be9110f6-9a1a-4e05-8e41-0eb8630686cc-ovsdbserver-nb\") pod \"dnsmasq-dns-66567888d7-qkxjs\" (UID: \"be9110f6-9a1a-4e05-8e41-0eb8630686cc\") " pod="openstack/dnsmasq-dns-66567888d7-qkxjs" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.097430 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be9110f6-9a1a-4e05-8e41-0eb8630686cc-config\") pod \"dnsmasq-dns-66567888d7-qkxjs\" (UID: \"be9110f6-9a1a-4e05-8e41-0eb8630686cc\") " pod="openstack/dnsmasq-dns-66567888d7-qkxjs" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.097469 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6k9lx\" (UniqueName: \"kubernetes.io/projected/be9110f6-9a1a-4e05-8e41-0eb8630686cc-kube-api-access-6k9lx\") pod \"dnsmasq-dns-66567888d7-qkxjs\" (UID: \"be9110f6-9a1a-4e05-8e41-0eb8630686cc\") " pod="openstack/dnsmasq-dns-66567888d7-qkxjs" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.097571 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/be9110f6-9a1a-4e05-8e41-0eb8630686cc-dns-svc\") pod \"dnsmasq-dns-66567888d7-qkxjs\" (UID: \"be9110f6-9a1a-4e05-8e41-0eb8630686cc\") " pod="openstack/dnsmasq-dns-66567888d7-qkxjs" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.097624 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/be9110f6-9a1a-4e05-8e41-0eb8630686cc-dns-swift-storage-0\") pod \"dnsmasq-dns-66567888d7-qkxjs\" (UID: \"be9110f6-9a1a-4e05-8e41-0eb8630686cc\") " pod="openstack/dnsmasq-dns-66567888d7-qkxjs" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.201170 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/be9110f6-9a1a-4e05-8e41-0eb8630686cc-dns-svc\") pod \"dnsmasq-dns-66567888d7-qkxjs\" (UID: \"be9110f6-9a1a-4e05-8e41-0eb8630686cc\") " pod="openstack/dnsmasq-dns-66567888d7-qkxjs" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.201244 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/be9110f6-9a1a-4e05-8e41-0eb8630686cc-dns-swift-storage-0\") pod \"dnsmasq-dns-66567888d7-qkxjs\" (UID: \"be9110f6-9a1a-4e05-8e41-0eb8630686cc\") " pod="openstack/dnsmasq-dns-66567888d7-qkxjs" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.201288 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/be9110f6-9a1a-4e05-8e41-0eb8630686cc-ovsdbserver-sb\") pod \"dnsmasq-dns-66567888d7-qkxjs\" (UID: \"be9110f6-9a1a-4e05-8e41-0eb8630686cc\") " pod="openstack/dnsmasq-dns-66567888d7-qkxjs" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.201313 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/be9110f6-9a1a-4e05-8e41-0eb8630686cc-ovsdbserver-nb\") pod \"dnsmasq-dns-66567888d7-qkxjs\" (UID: \"be9110f6-9a1a-4e05-8e41-0eb8630686cc\") " pod="openstack/dnsmasq-dns-66567888d7-qkxjs" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.201338 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be9110f6-9a1a-4e05-8e41-0eb8630686cc-config\") pod \"dnsmasq-dns-66567888d7-qkxjs\" (UID: \"be9110f6-9a1a-4e05-8e41-0eb8630686cc\") " pod="openstack/dnsmasq-dns-66567888d7-qkxjs" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.201362 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6k9lx\" (UniqueName: \"kubernetes.io/projected/be9110f6-9a1a-4e05-8e41-0eb8630686cc-kube-api-access-6k9lx\") pod \"dnsmasq-dns-66567888d7-qkxjs\" (UID: \"be9110f6-9a1a-4e05-8e41-0eb8630686cc\") " pod="openstack/dnsmasq-dns-66567888d7-qkxjs" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.202967 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/be9110f6-9a1a-4e05-8e41-0eb8630686cc-dns-svc\") pod \"dnsmasq-dns-66567888d7-qkxjs\" (UID: \"be9110f6-9a1a-4e05-8e41-0eb8630686cc\") " pod="openstack/dnsmasq-dns-66567888d7-qkxjs" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.203516 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/be9110f6-9a1a-4e05-8e41-0eb8630686cc-dns-swift-storage-0\") pod \"dnsmasq-dns-66567888d7-qkxjs\" (UID: \"be9110f6-9a1a-4e05-8e41-0eb8630686cc\") " pod="openstack/dnsmasq-dns-66567888d7-qkxjs" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.204087 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/be9110f6-9a1a-4e05-8e41-0eb8630686cc-ovsdbserver-sb\") pod \"dnsmasq-dns-66567888d7-qkxjs\" (UID: \"be9110f6-9a1a-4e05-8e41-0eb8630686cc\") " pod="openstack/dnsmasq-dns-66567888d7-qkxjs" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.204616 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/be9110f6-9a1a-4e05-8e41-0eb8630686cc-ovsdbserver-nb\") pod \"dnsmasq-dns-66567888d7-qkxjs\" (UID: \"be9110f6-9a1a-4e05-8e41-0eb8630686cc\") " pod="openstack/dnsmasq-dns-66567888d7-qkxjs" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.205163 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be9110f6-9a1a-4e05-8e41-0eb8630686cc-config\") pod \"dnsmasq-dns-66567888d7-qkxjs\" (UID: \"be9110f6-9a1a-4e05-8e41-0eb8630686cc\") " pod="openstack/dnsmasq-dns-66567888d7-qkxjs" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.252399 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6k9lx\" (UniqueName: \"kubernetes.io/projected/be9110f6-9a1a-4e05-8e41-0eb8630686cc-kube-api-access-6k9lx\") pod \"dnsmasq-dns-66567888d7-qkxjs\" (UID: \"be9110f6-9a1a-4e05-8e41-0eb8630686cc\") " pod="openstack/dnsmasq-dns-66567888d7-qkxjs" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.281562 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66567888d7-qkxjs" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.314084 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8495b76777-brtjp"] Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.336202 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.444653 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-ktvmp"] Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.508701 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77bbd879b9-grmqc" event={"ID":"afd532df-36b6-45cf-94e1-0e7182b938ce","Type":"ContainerDied","Data":"4335a1c9d0753ab65631cf672365f5378ffb1ea65eba3dacd5e6526c594ab4ab"} Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.508751 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4335a1c9d0753ab65631cf672365f5378ffb1ea65eba3dacd5e6526c594ab4ab" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.510579 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77bbd879b9-grmqc" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.512496 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8495b76777-brtjp" event={"ID":"747be938-8771-4bb6-8279-2a1e5607507d","Type":"ContainerStarted","Data":"c69d8fc2791741816801a4b7d7818a0e2c58c52fc504c796764783965e82d0d3"} Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.518758 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-dgttl" event={"ID":"4432a67a-7276-4f55-838d-b685529581d5","Type":"ContainerStarted","Data":"79f2d9e26f3b3806a7a81cd7b8365ff74ac06a75fd3803b4d82a48e4240dbf32"} Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.529519 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac","Type":"ContainerStarted","Data":"c16d0255b97b18005fd07da2c3219c413590ef57cd5dda64717961109d853194"} Jan 05 22:11:13 crc kubenswrapper[4910]: W0105 22:11:13.579603 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb94e459d_172c_41ca_a38c_384a5f3e323e.slice/crio-81fbf4382632658ac6489c62b331ba64f07d423a45af65c462acb27a2b0bf043 WatchSource:0}: Error finding container 81fbf4382632658ac6489c62b331ba64f07d423a45af65c462acb27a2b0bf043: Status 404 returned error can't find the container with id 81fbf4382632658ac6489c62b331ba64f07d423a45af65c462acb27a2b0bf043 Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.613697 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/afd532df-36b6-45cf-94e1-0e7182b938ce-ovsdbserver-nb\") pod \"afd532df-36b6-45cf-94e1-0e7182b938ce\" (UID: \"afd532df-36b6-45cf-94e1-0e7182b938ce\") " Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.613783 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j2ml5\" (UniqueName: \"kubernetes.io/projected/afd532df-36b6-45cf-94e1-0e7182b938ce-kube-api-access-j2ml5\") pod \"afd532df-36b6-45cf-94e1-0e7182b938ce\" (UID: \"afd532df-36b6-45cf-94e1-0e7182b938ce\") " Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.613807 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/afd532df-36b6-45cf-94e1-0e7182b938ce-dns-svc\") pod \"afd532df-36b6-45cf-94e1-0e7182b938ce\" (UID: \"afd532df-36b6-45cf-94e1-0e7182b938ce\") " Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.613918 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/afd532df-36b6-45cf-94e1-0e7182b938ce-dns-swift-storage-0\") pod \"afd532df-36b6-45cf-94e1-0e7182b938ce\" (UID: \"afd532df-36b6-45cf-94e1-0e7182b938ce\") " Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.613991 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/afd532df-36b6-45cf-94e1-0e7182b938ce-config\") pod \"afd532df-36b6-45cf-94e1-0e7182b938ce\" (UID: \"afd532df-36b6-45cf-94e1-0e7182b938ce\") " Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.614054 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/afd532df-36b6-45cf-94e1-0e7182b938ce-ovsdbserver-sb\") pod \"afd532df-36b6-45cf-94e1-0e7182b938ce\" (UID: \"afd532df-36b6-45cf-94e1-0e7182b938ce\") " Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.623275 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/afd532df-36b6-45cf-94e1-0e7182b938ce-kube-api-access-j2ml5" (OuterVolumeSpecName: "kube-api-access-j2ml5") pod "afd532df-36b6-45cf-94e1-0e7182b938ce" (UID: "afd532df-36b6-45cf-94e1-0e7182b938ce"). InnerVolumeSpecName "kube-api-access-j2ml5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.644807 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/afd532df-36b6-45cf-94e1-0e7182b938ce-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "afd532df-36b6-45cf-94e1-0e7182b938ce" (UID: "afd532df-36b6-45cf-94e1-0e7182b938ce"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.663376 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/afd532df-36b6-45cf-94e1-0e7182b938ce-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "afd532df-36b6-45cf-94e1-0e7182b938ce" (UID: "afd532df-36b6-45cf-94e1-0e7182b938ce"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.664862 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/afd532df-36b6-45cf-94e1-0e7182b938ce-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "afd532df-36b6-45cf-94e1-0e7182b938ce" (UID: "afd532df-36b6-45cf-94e1-0e7182b938ce"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.665842 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/afd532df-36b6-45cf-94e1-0e7182b938ce-config" (OuterVolumeSpecName: "config") pod "afd532df-36b6-45cf-94e1-0e7182b938ce" (UID: "afd532df-36b6-45cf-94e1-0e7182b938ce"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.680979 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/afd532df-36b6-45cf-94e1-0e7182b938ce-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "afd532df-36b6-45cf-94e1-0e7182b938ce" (UID: "afd532df-36b6-45cf-94e1-0e7182b938ce"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.716904 4910 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/afd532df-36b6-45cf-94e1-0e7182b938ce-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.717403 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/afd532df-36b6-45cf-94e1-0e7182b938ce-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.717417 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/afd532df-36b6-45cf-94e1-0e7182b938ce-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.717427 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/afd532df-36b6-45cf-94e1-0e7182b938ce-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.717438 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j2ml5\" (UniqueName: \"kubernetes.io/projected/afd532df-36b6-45cf-94e1-0e7182b938ce-kube-api-access-j2ml5\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.717450 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/afd532df-36b6-45cf-94e1-0e7182b938ce-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.741921 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 22:11:13 crc kubenswrapper[4910]: E0105 22:11:13.742311 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="afd532df-36b6-45cf-94e1-0e7182b938ce" containerName="init" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.742329 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="afd532df-36b6-45cf-94e1-0e7182b938ce" containerName="init" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.742519 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="afd532df-36b6-45cf-94e1-0e7182b938ce" containerName="init" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.743369 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.750134 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.750529 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.750731 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-fks6q" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.762275 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.921555 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.921614 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.921711 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gl54k\" (UniqueName: \"kubernetes.io/projected/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-kube-api-access-gl54k\") pod \"glance-default-external-api-0\" (UID: \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.921890 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.921955 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-logs\") pod \"glance-default-external-api-0\" (UID: \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.921989 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-scripts\") pod \"glance-default-external-api-0\" (UID: \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.922032 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-config-data\") pod \"glance-default-external-api-0\" (UID: \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:13 crc kubenswrapper[4910]: I0105 22:11:13.934672 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-66567888d7-qkxjs"] Jan 05 22:11:13 crc kubenswrapper[4910]: W0105 22:11:13.952800 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbe9110f6_9a1a_4e05_8e41_0eb8630686cc.slice/crio-a68c70ce4003a93a43b379ebfb09a5850d12eef770fbe9e304a207f2ac9308a2 WatchSource:0}: Error finding container a68c70ce4003a93a43b379ebfb09a5850d12eef770fbe9e304a207f2ac9308a2: Status 404 returned error can't find the container with id a68c70ce4003a93a43b379ebfb09a5850d12eef770fbe9e304a207f2ac9308a2 Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.023901 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gl54k\" (UniqueName: \"kubernetes.io/projected/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-kube-api-access-gl54k\") pod \"glance-default-external-api-0\" (UID: \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.024372 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.024401 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-logs\") pod \"glance-default-external-api-0\" (UID: \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.024425 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-scripts\") pod \"glance-default-external-api-0\" (UID: \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.024471 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-config-data\") pod \"glance-default-external-api-0\" (UID: \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.024518 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.024540 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.024913 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/glance-default-external-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.024976 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-logs\") pod \"glance-default-external-api-0\" (UID: \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.025428 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.030027 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-config-data\") pod \"glance-default-external-api-0\" (UID: \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.039973 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.041060 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-scripts\") pod \"glance-default-external-api-0\" (UID: \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.046693 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gl54k\" (UniqueName: \"kubernetes.io/projected/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-kube-api-access-gl54k\") pod \"glance-default-external-api-0\" (UID: \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.063217 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.135062 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.173848 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.174236 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.179535 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.295081 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.295789 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.369513 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14ac9c02-b3f0-41e5-81da-a3326d5ed403-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.369652 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.369702 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrwq6\" (UniqueName: \"kubernetes.io/projected/14ac9c02-b3f0-41e5-81da-a3326d5ed403-kube-api-access-lrwq6\") pod \"glance-default-internal-api-0\" (UID: \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.369741 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/14ac9c02-b3f0-41e5-81da-a3326d5ed403-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.369826 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/14ac9c02-b3f0-41e5-81da-a3326d5ed403-config-data\") pod \"glance-default-internal-api-0\" (UID: \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.369851 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/14ac9c02-b3f0-41e5-81da-a3326d5ed403-scripts\") pod \"glance-default-internal-api-0\" (UID: \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.370034 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/14ac9c02-b3f0-41e5-81da-a3326d5ed403-logs\") pod \"glance-default-internal-api-0\" (UID: \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.394407 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 22:11:14 crc kubenswrapper[4910]: E0105 22:11:14.395325 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle config-data glance httpd-run kube-api-access-lrwq6 logs scripts], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/glance-default-internal-api-0" podUID="14ac9c02-b3f0-41e5-81da-a3326d5ed403" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.410789 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.474317 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/14ac9c02-b3f0-41e5-81da-a3326d5ed403-config-data\") pod \"glance-default-internal-api-0\" (UID: \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.474358 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/14ac9c02-b3f0-41e5-81da-a3326d5ed403-scripts\") pod \"glance-default-internal-api-0\" (UID: \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.474390 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/14ac9c02-b3f0-41e5-81da-a3326d5ed403-logs\") pod \"glance-default-internal-api-0\" (UID: \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.474434 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14ac9c02-b3f0-41e5-81da-a3326d5ed403-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.474515 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.474576 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrwq6\" (UniqueName: \"kubernetes.io/projected/14ac9c02-b3f0-41e5-81da-a3326d5ed403-kube-api-access-lrwq6\") pod \"glance-default-internal-api-0\" (UID: \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.474602 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/14ac9c02-b3f0-41e5-81da-a3326d5ed403-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.475196 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/14ac9c02-b3f0-41e5-81da-a3326d5ed403-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.475433 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/14ac9c02-b3f0-41e5-81da-a3326d5ed403-logs\") pod \"glance-default-internal-api-0\" (UID: \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.476338 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-internal-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.479274 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/14ac9c02-b3f0-41e5-81da-a3326d5ed403-scripts\") pod \"glance-default-internal-api-0\" (UID: \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.493881 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/14ac9c02-b3f0-41e5-81da-a3326d5ed403-config-data\") pod \"glance-default-internal-api-0\" (UID: \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.499193 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14ac9c02-b3f0-41e5-81da-a3326d5ed403-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.507174 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrwq6\" (UniqueName: \"kubernetes.io/projected/14ac9c02-b3f0-41e5-81da-a3326d5ed403-kube-api-access-lrwq6\") pod \"glance-default-internal-api-0\" (UID: \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.551595 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-ktvmp" event={"ID":"b94e459d-172c-41ca-a38c-384a5f3e323e","Type":"ContainerStarted","Data":"81fbf4382632658ac6489c62b331ba64f07d423a45af65c462acb27a2b0bf043"} Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.554480 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.566643 4910 generic.go:334] "Generic (PLEG): container finished" podID="747be938-8771-4bb6-8279-2a1e5607507d" containerID="8fa704cf48fef15649c22f1fd9f5cfaa6b68013741eb7cdddbda31b989ed68b8" exitCode=0 Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.566766 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8495b76777-brtjp" event={"ID":"747be938-8771-4bb6-8279-2a1e5607507d","Type":"ContainerDied","Data":"8fa704cf48fef15649c22f1fd9f5cfaa6b68013741eb7cdddbda31b989ed68b8"} Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.609942 4910 generic.go:334] "Generic (PLEG): container finished" podID="be9110f6-9a1a-4e05-8e41-0eb8630686cc" containerID="5c0ede15b426840cab327d372b81755dfcc50901e0f9e3eeed4e5f6148d82e00" exitCode=0 Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.610072 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77bbd879b9-grmqc" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.612514 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66567888d7-qkxjs" event={"ID":"be9110f6-9a1a-4e05-8e41-0eb8630686cc","Type":"ContainerDied","Data":"5c0ede15b426840cab327d372b81755dfcc50901e0f9e3eeed4e5f6148d82e00"} Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.612575 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.612605 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66567888d7-qkxjs" event={"ID":"be9110f6-9a1a-4e05-8e41-0eb8630686cc","Type":"ContainerStarted","Data":"a68c70ce4003a93a43b379ebfb09a5850d12eef770fbe9e304a207f2ac9308a2"} Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.655356 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.751721 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77bbd879b9-grmqc"] Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.760028 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-77bbd879b9-grmqc"] Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.778857 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\" (UID: \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\") " Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.778961 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/14ac9c02-b3f0-41e5-81da-a3326d5ed403-config-data\") pod \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\" (UID: \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\") " Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.779090 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14ac9c02-b3f0-41e5-81da-a3326d5ed403-combined-ca-bundle\") pod \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\" (UID: \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\") " Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.779165 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/14ac9c02-b3f0-41e5-81da-a3326d5ed403-logs\") pod \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\" (UID: \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\") " Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.779297 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lrwq6\" (UniqueName: \"kubernetes.io/projected/14ac9c02-b3f0-41e5-81da-a3326d5ed403-kube-api-access-lrwq6\") pod \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\" (UID: \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\") " Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.779326 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/14ac9c02-b3f0-41e5-81da-a3326d5ed403-httpd-run\") pod \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\" (UID: \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\") " Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.779393 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/14ac9c02-b3f0-41e5-81da-a3326d5ed403-scripts\") pod \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\" (UID: \"14ac9c02-b3f0-41e5-81da-a3326d5ed403\") " Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.780490 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/14ac9c02-b3f0-41e5-81da-a3326d5ed403-logs" (OuterVolumeSpecName: "logs") pod "14ac9c02-b3f0-41e5-81da-a3326d5ed403" (UID: "14ac9c02-b3f0-41e5-81da-a3326d5ed403"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.782467 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "14ac9c02-b3f0-41e5-81da-a3326d5ed403" (UID: "14ac9c02-b3f0-41e5-81da-a3326d5ed403"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.783593 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/14ac9c02-b3f0-41e5-81da-a3326d5ed403-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "14ac9c02-b3f0-41e5-81da-a3326d5ed403" (UID: "14ac9c02-b3f0-41e5-81da-a3326d5ed403"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.787298 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14ac9c02-b3f0-41e5-81da-a3326d5ed403-kube-api-access-lrwq6" (OuterVolumeSpecName: "kube-api-access-lrwq6") pod "14ac9c02-b3f0-41e5-81da-a3326d5ed403" (UID: "14ac9c02-b3f0-41e5-81da-a3326d5ed403"). InnerVolumeSpecName "kube-api-access-lrwq6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.789370 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14ac9c02-b3f0-41e5-81da-a3326d5ed403-config-data" (OuterVolumeSpecName: "config-data") pod "14ac9c02-b3f0-41e5-81da-a3326d5ed403" (UID: "14ac9c02-b3f0-41e5-81da-a3326d5ed403"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.796060 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14ac9c02-b3f0-41e5-81da-a3326d5ed403-scripts" (OuterVolumeSpecName: "scripts") pod "14ac9c02-b3f0-41e5-81da-a3326d5ed403" (UID: "14ac9c02-b3f0-41e5-81da-a3326d5ed403"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.837953 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14ac9c02-b3f0-41e5-81da-a3326d5ed403-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "14ac9c02-b3f0-41e5-81da-a3326d5ed403" (UID: "14ac9c02-b3f0-41e5-81da-a3326d5ed403"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.884054 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14ac9c02-b3f0-41e5-81da-a3326d5ed403-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.884105 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/14ac9c02-b3f0-41e5-81da-a3326d5ed403-logs\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.884151 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lrwq6\" (UniqueName: \"kubernetes.io/projected/14ac9c02-b3f0-41e5-81da-a3326d5ed403-kube-api-access-lrwq6\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.884165 4910 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/14ac9c02-b3f0-41e5-81da-a3326d5ed403-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.884176 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/14ac9c02-b3f0-41e5-81da-a3326d5ed403-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.884217 4910 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.884229 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/14ac9c02-b3f0-41e5-81da-a3326d5ed403-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.908699 4910 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.972693 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 22:11:14 crc kubenswrapper[4910]: I0105 22:11:14.986563 4910 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.248896 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8495b76777-brtjp" Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.394822 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/747be938-8771-4bb6-8279-2a1e5607507d-dns-swift-storage-0\") pod \"747be938-8771-4bb6-8279-2a1e5607507d\" (UID: \"747be938-8771-4bb6-8279-2a1e5607507d\") " Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.394993 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/747be938-8771-4bb6-8279-2a1e5607507d-ovsdbserver-sb\") pod \"747be938-8771-4bb6-8279-2a1e5607507d\" (UID: \"747be938-8771-4bb6-8279-2a1e5607507d\") " Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.395094 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k5jtq\" (UniqueName: \"kubernetes.io/projected/747be938-8771-4bb6-8279-2a1e5607507d-kube-api-access-k5jtq\") pod \"747be938-8771-4bb6-8279-2a1e5607507d\" (UID: \"747be938-8771-4bb6-8279-2a1e5607507d\") " Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.395145 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/747be938-8771-4bb6-8279-2a1e5607507d-config\") pod \"747be938-8771-4bb6-8279-2a1e5607507d\" (UID: \"747be938-8771-4bb6-8279-2a1e5607507d\") " Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.395179 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/747be938-8771-4bb6-8279-2a1e5607507d-dns-svc\") pod \"747be938-8771-4bb6-8279-2a1e5607507d\" (UID: \"747be938-8771-4bb6-8279-2a1e5607507d\") " Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.395350 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/747be938-8771-4bb6-8279-2a1e5607507d-ovsdbserver-nb\") pod \"747be938-8771-4bb6-8279-2a1e5607507d\" (UID: \"747be938-8771-4bb6-8279-2a1e5607507d\") " Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.401297 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/747be938-8771-4bb6-8279-2a1e5607507d-kube-api-access-k5jtq" (OuterVolumeSpecName: "kube-api-access-k5jtq") pod "747be938-8771-4bb6-8279-2a1e5607507d" (UID: "747be938-8771-4bb6-8279-2a1e5607507d"). InnerVolumeSpecName "kube-api-access-k5jtq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.425055 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/747be938-8771-4bb6-8279-2a1e5607507d-config" (OuterVolumeSpecName: "config") pod "747be938-8771-4bb6-8279-2a1e5607507d" (UID: "747be938-8771-4bb6-8279-2a1e5607507d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.435184 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/747be938-8771-4bb6-8279-2a1e5607507d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "747be938-8771-4bb6-8279-2a1e5607507d" (UID: "747be938-8771-4bb6-8279-2a1e5607507d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.455837 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/747be938-8771-4bb6-8279-2a1e5607507d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "747be938-8771-4bb6-8279-2a1e5607507d" (UID: "747be938-8771-4bb6-8279-2a1e5607507d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.458267 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/747be938-8771-4bb6-8279-2a1e5607507d-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "747be938-8771-4bb6-8279-2a1e5607507d" (UID: "747be938-8771-4bb6-8279-2a1e5607507d"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.497645 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/747be938-8771-4bb6-8279-2a1e5607507d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.497672 4910 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/747be938-8771-4bb6-8279-2a1e5607507d-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.497681 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/747be938-8771-4bb6-8279-2a1e5607507d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.497694 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k5jtq\" (UniqueName: \"kubernetes.io/projected/747be938-8771-4bb6-8279-2a1e5607507d-kube-api-access-k5jtq\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.497703 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/747be938-8771-4bb6-8279-2a1e5607507d-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.507564 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/747be938-8771-4bb6-8279-2a1e5607507d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "747be938-8771-4bb6-8279-2a1e5607507d" (UID: "747be938-8771-4bb6-8279-2a1e5607507d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.600172 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/747be938-8771-4bb6-8279-2a1e5607507d-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.630907 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66567888d7-qkxjs" event={"ID":"be9110f6-9a1a-4e05-8e41-0eb8630686cc","Type":"ContainerStarted","Data":"49c2536bdde68dc1507406c0e6280d6c8a672a65b01dc41ba5716ededb429a38"} Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.634510 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1c099f46-80ac-4aaf-84e7-d9dd13fb4241","Type":"ContainerStarted","Data":"12c7114232ad25e06e71567799ed974fc31f83a2e04bfc15e2f8da93eb996cb6"} Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.640455 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.642802 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8495b76777-brtjp" event={"ID":"747be938-8771-4bb6-8279-2a1e5607507d","Type":"ContainerDied","Data":"c69d8fc2791741816801a4b7d7818a0e2c58c52fc504c796764783965e82d0d3"} Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.642839 4910 scope.go:117] "RemoveContainer" containerID="8fa704cf48fef15649c22f1fd9f5cfaa6b68013741eb7cdddbda31b989ed68b8" Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.645110 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8495b76777-brtjp" Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.675176 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-66567888d7-qkxjs" podStartSLOduration=3.6751529830000003 podStartE2EDuration="3.675152983s" podCreationTimestamp="2026-01-05 22:11:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:11:15.655828496 +0000 UTC m=+1207.233326166" watchObservedRunningTime="2026-01-05 22:11:15.675152983 +0000 UTC m=+1207.252650653" Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.733226 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.750687 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.806425 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 22:11:15 crc kubenswrapper[4910]: E0105 22:11:15.807646 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="747be938-8771-4bb6-8279-2a1e5607507d" containerName="init" Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.807672 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="747be938-8771-4bb6-8279-2a1e5607507d" containerName="init" Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.808014 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="747be938-8771-4bb6-8279-2a1e5607507d" containerName="init" Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.811454 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.821537 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.835874 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.881368 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8495b76777-brtjp"] Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.892727 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8495b76777-brtjp"] Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.932866 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1baf2afa-a481-4037-b114-68e8691f8486-scripts\") pod \"glance-default-internal-api-0\" (UID: \"1baf2afa-a481-4037-b114-68e8691f8486\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.932929 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1baf2afa-a481-4037-b114-68e8691f8486-config-data\") pod \"glance-default-internal-api-0\" (UID: \"1baf2afa-a481-4037-b114-68e8691f8486\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.932959 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"1baf2afa-a481-4037-b114-68e8691f8486\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.932989 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1baf2afa-a481-4037-b114-68e8691f8486-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"1baf2afa-a481-4037-b114-68e8691f8486\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.933142 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1baf2afa-a481-4037-b114-68e8691f8486-logs\") pod \"glance-default-internal-api-0\" (UID: \"1baf2afa-a481-4037-b114-68e8691f8486\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.933183 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1baf2afa-a481-4037-b114-68e8691f8486-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"1baf2afa-a481-4037-b114-68e8691f8486\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:15 crc kubenswrapper[4910]: I0105 22:11:15.933224 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5lxkb\" (UniqueName: \"kubernetes.io/projected/1baf2afa-a481-4037-b114-68e8691f8486-kube-api-access-5lxkb\") pod \"glance-default-internal-api-0\" (UID: \"1baf2afa-a481-4037-b114-68e8691f8486\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:16 crc kubenswrapper[4910]: I0105 22:11:16.035086 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1baf2afa-a481-4037-b114-68e8691f8486-logs\") pod \"glance-default-internal-api-0\" (UID: \"1baf2afa-a481-4037-b114-68e8691f8486\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:16 crc kubenswrapper[4910]: I0105 22:11:16.035202 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1baf2afa-a481-4037-b114-68e8691f8486-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"1baf2afa-a481-4037-b114-68e8691f8486\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:16 crc kubenswrapper[4910]: I0105 22:11:16.035240 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5lxkb\" (UniqueName: \"kubernetes.io/projected/1baf2afa-a481-4037-b114-68e8691f8486-kube-api-access-5lxkb\") pod \"glance-default-internal-api-0\" (UID: \"1baf2afa-a481-4037-b114-68e8691f8486\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:16 crc kubenswrapper[4910]: I0105 22:11:16.035256 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1baf2afa-a481-4037-b114-68e8691f8486-scripts\") pod \"glance-default-internal-api-0\" (UID: \"1baf2afa-a481-4037-b114-68e8691f8486\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:16 crc kubenswrapper[4910]: I0105 22:11:16.035300 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1baf2afa-a481-4037-b114-68e8691f8486-config-data\") pod \"glance-default-internal-api-0\" (UID: \"1baf2afa-a481-4037-b114-68e8691f8486\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:16 crc kubenswrapper[4910]: I0105 22:11:16.035338 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"1baf2afa-a481-4037-b114-68e8691f8486\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:16 crc kubenswrapper[4910]: I0105 22:11:16.035388 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1baf2afa-a481-4037-b114-68e8691f8486-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"1baf2afa-a481-4037-b114-68e8691f8486\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:16 crc kubenswrapper[4910]: I0105 22:11:16.035668 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1baf2afa-a481-4037-b114-68e8691f8486-logs\") pod \"glance-default-internal-api-0\" (UID: \"1baf2afa-a481-4037-b114-68e8691f8486\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:16 crc kubenswrapper[4910]: I0105 22:11:16.036727 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"1baf2afa-a481-4037-b114-68e8691f8486\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-internal-api-0" Jan 05 22:11:16 crc kubenswrapper[4910]: I0105 22:11:16.037864 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1baf2afa-a481-4037-b114-68e8691f8486-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"1baf2afa-a481-4037-b114-68e8691f8486\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:16 crc kubenswrapper[4910]: I0105 22:11:16.047679 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1baf2afa-a481-4037-b114-68e8691f8486-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"1baf2afa-a481-4037-b114-68e8691f8486\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:16 crc kubenswrapper[4910]: I0105 22:11:16.048895 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1baf2afa-a481-4037-b114-68e8691f8486-scripts\") pod \"glance-default-internal-api-0\" (UID: \"1baf2afa-a481-4037-b114-68e8691f8486\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:16 crc kubenswrapper[4910]: I0105 22:11:16.059699 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1baf2afa-a481-4037-b114-68e8691f8486-config-data\") pod \"glance-default-internal-api-0\" (UID: \"1baf2afa-a481-4037-b114-68e8691f8486\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:16 crc kubenswrapper[4910]: I0105 22:11:16.065854 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5lxkb\" (UniqueName: \"kubernetes.io/projected/1baf2afa-a481-4037-b114-68e8691f8486-kube-api-access-5lxkb\") pod \"glance-default-internal-api-0\" (UID: \"1baf2afa-a481-4037-b114-68e8691f8486\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:16 crc kubenswrapper[4910]: I0105 22:11:16.113365 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"1baf2afa-a481-4037-b114-68e8691f8486\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:16 crc kubenswrapper[4910]: I0105 22:11:16.167526 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 05 22:11:16 crc kubenswrapper[4910]: I0105 22:11:16.659035 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-66567888d7-qkxjs" Jan 05 22:11:16 crc kubenswrapper[4910]: I0105 22:11:16.742662 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="14ac9c02-b3f0-41e5-81da-a3326d5ed403" path="/var/lib/kubelet/pods/14ac9c02-b3f0-41e5-81da-a3326d5ed403/volumes" Jan 05 22:11:16 crc kubenswrapper[4910]: I0105 22:11:16.743524 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="747be938-8771-4bb6-8279-2a1e5607507d" path="/var/lib/kubelet/pods/747be938-8771-4bb6-8279-2a1e5607507d/volumes" Jan 05 22:11:16 crc kubenswrapper[4910]: I0105 22:11:16.744335 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="afd532df-36b6-45cf-94e1-0e7182b938ce" path="/var/lib/kubelet/pods/afd532df-36b6-45cf-94e1-0e7182b938ce/volumes" Jan 05 22:11:16 crc kubenswrapper[4910]: I0105 22:11:16.824355 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 22:11:17 crc kubenswrapper[4910]: I0105 22:11:17.669828 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1c099f46-80ac-4aaf-84e7-d9dd13fb4241","Type":"ContainerStarted","Data":"61653c9b11f66a5d23311200ddf64b7f858df6520e2da84a235365818b3587b8"} Jan 05 22:11:17 crc kubenswrapper[4910]: I0105 22:11:17.670326 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1c099f46-80ac-4aaf-84e7-d9dd13fb4241","Type":"ContainerStarted","Data":"977c7c1dbb951673e0867086fa50d782c68937cc790b4a541a38e62e64493473"} Jan 05 22:11:17 crc kubenswrapper[4910]: I0105 22:11:17.673503 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1baf2afa-a481-4037-b114-68e8691f8486","Type":"ContainerStarted","Data":"e1a917cc506e884deb60fd75ab7cd04509ff31da01327eda8e501f33638f84bb"} Jan 05 22:11:18 crc kubenswrapper[4910]: I0105 22:11:18.714610 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1baf2afa-a481-4037-b114-68e8691f8486","Type":"ContainerStarted","Data":"3e81889900ecfb81c9033d67992d4ae8127699fac80983153cbb79ebc2f5ca35"} Jan 05 22:11:18 crc kubenswrapper[4910]: I0105 22:11:18.714697 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="1c099f46-80ac-4aaf-84e7-d9dd13fb4241" containerName="glance-log" containerID="cri-o://61653c9b11f66a5d23311200ddf64b7f858df6520e2da84a235365818b3587b8" gracePeriod=30 Jan 05 22:11:18 crc kubenswrapper[4910]: I0105 22:11:18.714782 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="1c099f46-80ac-4aaf-84e7-d9dd13fb4241" containerName="glance-httpd" containerID="cri-o://977c7c1dbb951673e0867086fa50d782c68937cc790b4a541a38e62e64493473" gracePeriod=30 Jan 05 22:11:18 crc kubenswrapper[4910]: I0105 22:11:18.750630 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=6.750602369 podStartE2EDuration="6.750602369s" podCreationTimestamp="2026-01-05 22:11:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:11:18.741244653 +0000 UTC m=+1210.318742323" watchObservedRunningTime="2026-01-05 22:11:18.750602369 +0000 UTC m=+1210.328100039" Jan 05 22:11:19 crc kubenswrapper[4910]: I0105 22:11:19.733627 4910 generic.go:334] "Generic (PLEG): container finished" podID="970366e8-fe49-4945-8223-072bf9227a15" containerID="ca4ab0b3e97c3a0c6d935237ab326c6797dbb8d63f989b52bb60164c52dd940e" exitCode=0 Jan 05 22:11:19 crc kubenswrapper[4910]: I0105 22:11:19.733724 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-46j7w" event={"ID":"970366e8-fe49-4945-8223-072bf9227a15","Type":"ContainerDied","Data":"ca4ab0b3e97c3a0c6d935237ab326c6797dbb8d63f989b52bb60164c52dd940e"} Jan 05 22:11:19 crc kubenswrapper[4910]: I0105 22:11:19.739393 4910 generic.go:334] "Generic (PLEG): container finished" podID="1c099f46-80ac-4aaf-84e7-d9dd13fb4241" containerID="977c7c1dbb951673e0867086fa50d782c68937cc790b4a541a38e62e64493473" exitCode=0 Jan 05 22:11:19 crc kubenswrapper[4910]: I0105 22:11:19.739432 4910 generic.go:334] "Generic (PLEG): container finished" podID="1c099f46-80ac-4aaf-84e7-d9dd13fb4241" containerID="61653c9b11f66a5d23311200ddf64b7f858df6520e2da84a235365818b3587b8" exitCode=143 Jan 05 22:11:19 crc kubenswrapper[4910]: I0105 22:11:19.739460 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1c099f46-80ac-4aaf-84e7-d9dd13fb4241","Type":"ContainerDied","Data":"977c7c1dbb951673e0867086fa50d782c68937cc790b4a541a38e62e64493473"} Jan 05 22:11:19 crc kubenswrapper[4910]: I0105 22:11:19.739494 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1c099f46-80ac-4aaf-84e7-d9dd13fb4241","Type":"ContainerDied","Data":"61653c9b11f66a5d23311200ddf64b7f858df6520e2da84a235365818b3587b8"} Jan 05 22:11:21 crc kubenswrapper[4910]: I0105 22:11:21.414960 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 22:11:23 crc kubenswrapper[4910]: I0105 22:11:23.284380 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-66567888d7-qkxjs" Jan 05 22:11:23 crc kubenswrapper[4910]: I0105 22:11:23.386051 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75bdffd66f-fxgpr"] Jan 05 22:11:23 crc kubenswrapper[4910]: I0105 22:11:23.386383 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" podUID="853c3a83-badd-474e-b356-5034158f9450" containerName="dnsmasq-dns" containerID="cri-o://ad0ae345e35fac61af32e0cff6a542a82c46c5fb3aaedcc6e1de4b8d81337d80" gracePeriod=10 Jan 05 22:11:24 crc kubenswrapper[4910]: I0105 22:11:24.793899 4910 generic.go:334] "Generic (PLEG): container finished" podID="853c3a83-badd-474e-b356-5034158f9450" containerID="ad0ae345e35fac61af32e0cff6a542a82c46c5fb3aaedcc6e1de4b8d81337d80" exitCode=0 Jan 05 22:11:24 crc kubenswrapper[4910]: I0105 22:11:24.793998 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" event={"ID":"853c3a83-badd-474e-b356-5034158f9450","Type":"ContainerDied","Data":"ad0ae345e35fac61af32e0cff6a542a82c46c5fb3aaedcc6e1de4b8d81337d80"} Jan 05 22:11:26 crc kubenswrapper[4910]: I0105 22:11:26.390773 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" podUID="853c3a83-badd-474e-b356-5034158f9450" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.126:5353: connect: connection refused" Jan 05 22:11:31 crc kubenswrapper[4910]: I0105 22:11:31.389992 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" podUID="853c3a83-badd-474e-b356-5034158f9450" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.126:5353: connect: connection refused" Jan 05 22:11:36 crc kubenswrapper[4910]: I0105 22:11:36.389702 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" podUID="853c3a83-badd-474e-b356-5034158f9450" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.126:5353: connect: connection refused" Jan 05 22:11:36 crc kubenswrapper[4910]: I0105 22:11:36.390247 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.324000 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.331858 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-46j7w" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.410471 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/970366e8-fe49-4945-8223-072bf9227a15-credential-keys\") pod \"970366e8-fe49-4945-8223-072bf9227a15\" (UID: \"970366e8-fe49-4945-8223-072bf9227a15\") " Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.425166 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/970366e8-fe49-4945-8223-072bf9227a15-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "970366e8-fe49-4945-8223-072bf9227a15" (UID: "970366e8-fe49-4945-8223-072bf9227a15"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.511842 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-httpd-run\") pod \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\" (UID: \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\") " Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.511898 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x5d9j\" (UniqueName: \"kubernetes.io/projected/970366e8-fe49-4945-8223-072bf9227a15-kube-api-access-x5d9j\") pod \"970366e8-fe49-4945-8223-072bf9227a15\" (UID: \"970366e8-fe49-4945-8223-072bf9227a15\") " Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.511935 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/970366e8-fe49-4945-8223-072bf9227a15-fernet-keys\") pod \"970366e8-fe49-4945-8223-072bf9227a15\" (UID: \"970366e8-fe49-4945-8223-072bf9227a15\") " Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.511967 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-scripts\") pod \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\" (UID: \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\") " Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.512262 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gl54k\" (UniqueName: \"kubernetes.io/projected/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-kube-api-access-gl54k\") pod \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\" (UID: \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\") " Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.512297 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-config-data\") pod \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\" (UID: \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\") " Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.512377 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\" (UID: \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\") " Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.512441 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/970366e8-fe49-4945-8223-072bf9227a15-combined-ca-bundle\") pod \"970366e8-fe49-4945-8223-072bf9227a15\" (UID: \"970366e8-fe49-4945-8223-072bf9227a15\") " Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.512460 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-combined-ca-bundle\") pod \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\" (UID: \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\") " Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.512484 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-logs\") pod \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\" (UID: \"1c099f46-80ac-4aaf-84e7-d9dd13fb4241\") " Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.512518 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/970366e8-fe49-4945-8223-072bf9227a15-scripts\") pod \"970366e8-fe49-4945-8223-072bf9227a15\" (UID: \"970366e8-fe49-4945-8223-072bf9227a15\") " Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.512550 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/970366e8-fe49-4945-8223-072bf9227a15-config-data\") pod \"970366e8-fe49-4945-8223-072bf9227a15\" (UID: \"970366e8-fe49-4945-8223-072bf9227a15\") " Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.512903 4910 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/970366e8-fe49-4945-8223-072bf9227a15-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.513282 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-logs" (OuterVolumeSpecName: "logs") pod "1c099f46-80ac-4aaf-84e7-d9dd13fb4241" (UID: "1c099f46-80ac-4aaf-84e7-d9dd13fb4241"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.513733 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "1c099f46-80ac-4aaf-84e7-d9dd13fb4241" (UID: "1c099f46-80ac-4aaf-84e7-d9dd13fb4241"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.517990 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-scripts" (OuterVolumeSpecName: "scripts") pod "1c099f46-80ac-4aaf-84e7-d9dd13fb4241" (UID: "1c099f46-80ac-4aaf-84e7-d9dd13fb4241"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.517987 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/970366e8-fe49-4945-8223-072bf9227a15-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "970366e8-fe49-4945-8223-072bf9227a15" (UID: "970366e8-fe49-4945-8223-072bf9227a15"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.518435 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "glance") pod "1c099f46-80ac-4aaf-84e7-d9dd13fb4241" (UID: "1c099f46-80ac-4aaf-84e7-d9dd13fb4241"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.519305 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-kube-api-access-gl54k" (OuterVolumeSpecName: "kube-api-access-gl54k") pod "1c099f46-80ac-4aaf-84e7-d9dd13fb4241" (UID: "1c099f46-80ac-4aaf-84e7-d9dd13fb4241"). InnerVolumeSpecName "kube-api-access-gl54k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.519678 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/970366e8-fe49-4945-8223-072bf9227a15-kube-api-access-x5d9j" (OuterVolumeSpecName: "kube-api-access-x5d9j") pod "970366e8-fe49-4945-8223-072bf9227a15" (UID: "970366e8-fe49-4945-8223-072bf9227a15"). InnerVolumeSpecName "kube-api-access-x5d9j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.533033 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/970366e8-fe49-4945-8223-072bf9227a15-scripts" (OuterVolumeSpecName: "scripts") pod "970366e8-fe49-4945-8223-072bf9227a15" (UID: "970366e8-fe49-4945-8223-072bf9227a15"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.540409 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1c099f46-80ac-4aaf-84e7-d9dd13fb4241" (UID: "1c099f46-80ac-4aaf-84e7-d9dd13fb4241"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.546244 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/970366e8-fe49-4945-8223-072bf9227a15-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "970366e8-fe49-4945-8223-072bf9227a15" (UID: "970366e8-fe49-4945-8223-072bf9227a15"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.560150 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/970366e8-fe49-4945-8223-072bf9227a15-config-data" (OuterVolumeSpecName: "config-data") pod "970366e8-fe49-4945-8223-072bf9227a15" (UID: "970366e8-fe49-4945-8223-072bf9227a15"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.566007 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-config-data" (OuterVolumeSpecName: "config-data") pod "1c099f46-80ac-4aaf-84e7-d9dd13fb4241" (UID: "1c099f46-80ac-4aaf-84e7-d9dd13fb4241"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.614590 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gl54k\" (UniqueName: \"kubernetes.io/projected/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-kube-api-access-gl54k\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.614634 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.614678 4910 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.614691 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/970366e8-fe49-4945-8223-072bf9227a15-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.614725 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.614738 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-logs\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.614751 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/970366e8-fe49-4945-8223-072bf9227a15-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.614762 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/970366e8-fe49-4945-8223-072bf9227a15-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.614772 4910 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.614784 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x5d9j\" (UniqueName: \"kubernetes.io/projected/970366e8-fe49-4945-8223-072bf9227a15-kube-api-access-x5d9j\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.614794 4910 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/970366e8-fe49-4945-8223-072bf9227a15-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.614804 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c099f46-80ac-4aaf-84e7-d9dd13fb4241-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.637082 4910 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.716206 4910 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:37 crc kubenswrapper[4910]: E0105 22:11:37.817706 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api@sha256:fe32d3ea620f0c7ecfdde9bbf28417fde03bc18c6f60b1408fa8da24d8188f16" Jan 05 22:11:37 crc kubenswrapper[4910]: E0105 22:11:37.818308 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api@sha256:fe32d3ea620f0c7ecfdde9bbf28417fde03bc18c6f60b1408fa8da24d8188f16,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mzjv2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-ktvmp_openstack(b94e459d-172c-41ca-a38c-384a5f3e323e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 05 22:11:37 crc kubenswrapper[4910]: E0105 22:11:37.820223 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-ktvmp" podUID="b94e459d-172c-41ca-a38c-384a5f3e323e" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.948715 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1c099f46-80ac-4aaf-84e7-d9dd13fb4241","Type":"ContainerDied","Data":"12c7114232ad25e06e71567799ed974fc31f83a2e04bfc15e2f8da93eb996cb6"} Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.948751 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.948802 4910 scope.go:117] "RemoveContainer" containerID="977c7c1dbb951673e0867086fa50d782c68937cc790b4a541a38e62e64493473" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.951753 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-46j7w" event={"ID":"970366e8-fe49-4945-8223-072bf9227a15","Type":"ContainerDied","Data":"e55891592eecad8d1174d74b0f0fd65bfeb9973b8bf9865f74d40c16d77d9f9d"} Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.951920 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e55891592eecad8d1174d74b0f0fd65bfeb9973b8bf9865f74d40c16d77d9f9d" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.951785 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-46j7w" Jan 05 22:11:37 crc kubenswrapper[4910]: E0105 22:11:37.953520 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api@sha256:fe32d3ea620f0c7ecfdde9bbf28417fde03bc18c6f60b1408fa8da24d8188f16\\\"\"" pod="openstack/barbican-db-sync-ktvmp" podUID="b94e459d-172c-41ca-a38c-384a5f3e323e" Jan 05 22:11:37 crc kubenswrapper[4910]: I0105 22:11:37.994759 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.008839 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.025048 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 22:11:38 crc kubenswrapper[4910]: E0105 22:11:38.025706 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c099f46-80ac-4aaf-84e7-d9dd13fb4241" containerName="glance-log" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.025727 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c099f46-80ac-4aaf-84e7-d9dd13fb4241" containerName="glance-log" Jan 05 22:11:38 crc kubenswrapper[4910]: E0105 22:11:38.025739 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="970366e8-fe49-4945-8223-072bf9227a15" containerName="keystone-bootstrap" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.025747 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="970366e8-fe49-4945-8223-072bf9227a15" containerName="keystone-bootstrap" Jan 05 22:11:38 crc kubenswrapper[4910]: E0105 22:11:38.025770 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c099f46-80ac-4aaf-84e7-d9dd13fb4241" containerName="glance-httpd" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.025777 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c099f46-80ac-4aaf-84e7-d9dd13fb4241" containerName="glance-httpd" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.025953 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c099f46-80ac-4aaf-84e7-d9dd13fb4241" containerName="glance-log" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.025972 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="970366e8-fe49-4945-8223-072bf9227a15" containerName="keystone-bootstrap" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.025985 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c099f46-80ac-4aaf-84e7-d9dd13fb4241" containerName="glance-httpd" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.027031 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.030342 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.030652 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.043023 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.225760 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a1e131f-00cf-4724-91e0-52d2766184d9-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.225872 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a1e131f-00cf-4724-91e0-52d2766184d9-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.225960 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a1e131f-00cf-4724-91e0-52d2766184d9-logs\") pod \"glance-default-external-api-0\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.226013 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22jt6\" (UniqueName: \"kubernetes.io/projected/3a1e131f-00cf-4724-91e0-52d2766184d9-kube-api-access-22jt6\") pod \"glance-default-external-api-0\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.226215 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a1e131f-00cf-4724-91e0-52d2766184d9-scripts\") pod \"glance-default-external-api-0\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.226295 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3a1e131f-00cf-4724-91e0-52d2766184d9-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.226348 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a1e131f-00cf-4724-91e0-52d2766184d9-config-data\") pod \"glance-default-external-api-0\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.226590 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:38 crc kubenswrapper[4910]: E0105 22:11:38.234921 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central@sha256:5a548c25fe3d02f7a042cb0a6d28fc8039a34c4a3b3d07aadda4aba3a926e777" Jan 05 22:11:38 crc kubenswrapper[4910]: E0105 22:11:38.235107 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central@sha256:5a548c25fe3d02f7a042cb0a6d28fc8039a34c4a3b3d07aadda4aba3a926e777,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n64chb4h68dhcdh564hb5h67ch584hbh664h66ch5c4h89h686hfh6dh58dh7dh5d4h596hf8h5ffh5c9h99hfdhc6h56dh597h56h644h65fh64fq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-z9wxw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(c38271dc-8b9a-4bb0-a8bc-2fc78c641aac): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.329312 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a1e131f-00cf-4724-91e0-52d2766184d9-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.329411 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a1e131f-00cf-4724-91e0-52d2766184d9-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.329459 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a1e131f-00cf-4724-91e0-52d2766184d9-logs\") pod \"glance-default-external-api-0\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.330199 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-22jt6\" (UniqueName: \"kubernetes.io/projected/3a1e131f-00cf-4724-91e0-52d2766184d9-kube-api-access-22jt6\") pod \"glance-default-external-api-0\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.330251 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a1e131f-00cf-4724-91e0-52d2766184d9-scripts\") pod \"glance-default-external-api-0\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.330279 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3a1e131f-00cf-4724-91e0-52d2766184d9-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.330299 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a1e131f-00cf-4724-91e0-52d2766184d9-config-data\") pod \"glance-default-external-api-0\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.330328 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.330697 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a1e131f-00cf-4724-91e0-52d2766184d9-logs\") pod \"glance-default-external-api-0\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.330961 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3a1e131f-00cf-4724-91e0-52d2766184d9-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.331302 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/glance-default-external-api-0" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.336552 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a1e131f-00cf-4724-91e0-52d2766184d9-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.336823 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a1e131f-00cf-4724-91e0-52d2766184d9-scripts\") pod \"glance-default-external-api-0\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.336847 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a1e131f-00cf-4724-91e0-52d2766184d9-config-data\") pod \"glance-default-external-api-0\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.338601 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a1e131f-00cf-4724-91e0-52d2766184d9-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.358321 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-22jt6\" (UniqueName: \"kubernetes.io/projected/3a1e131f-00cf-4724-91e0-52d2766184d9-kube-api-access-22jt6\") pod \"glance-default-external-api-0\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.369912 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") " pod="openstack/glance-default-external-api-0" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.465891 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-46j7w"] Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.480310 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-46j7w"] Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.556352 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-gs27h"] Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.557853 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-gs27h" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.560837 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.560884 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.560939 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.561050 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-t5hmz" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.561226 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.564792 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-gs27h"] Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.657675 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80f364a3-6407-463e-9565-a3bb43cb1494-combined-ca-bundle\") pod \"keystone-bootstrap-gs27h\" (UID: \"80f364a3-6407-463e-9565-a3bb43cb1494\") " pod="openstack/keystone-bootstrap-gs27h" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.657740 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80f364a3-6407-463e-9565-a3bb43cb1494-config-data\") pod \"keystone-bootstrap-gs27h\" (UID: \"80f364a3-6407-463e-9565-a3bb43cb1494\") " pod="openstack/keystone-bootstrap-gs27h" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.657822 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/80f364a3-6407-463e-9565-a3bb43cb1494-credential-keys\") pod \"keystone-bootstrap-gs27h\" (UID: \"80f364a3-6407-463e-9565-a3bb43cb1494\") " pod="openstack/keystone-bootstrap-gs27h" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.657855 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jqvwn\" (UniqueName: \"kubernetes.io/projected/80f364a3-6407-463e-9565-a3bb43cb1494-kube-api-access-jqvwn\") pod \"keystone-bootstrap-gs27h\" (UID: \"80f364a3-6407-463e-9565-a3bb43cb1494\") " pod="openstack/keystone-bootstrap-gs27h" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.657904 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/80f364a3-6407-463e-9565-a3bb43cb1494-fernet-keys\") pod \"keystone-bootstrap-gs27h\" (UID: \"80f364a3-6407-463e-9565-a3bb43cb1494\") " pod="openstack/keystone-bootstrap-gs27h" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.658018 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80f364a3-6407-463e-9565-a3bb43cb1494-scripts\") pod \"keystone-bootstrap-gs27h\" (UID: \"80f364a3-6407-463e-9565-a3bb43cb1494\") " pod="openstack/keystone-bootstrap-gs27h" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.660539 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.737848 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c099f46-80ac-4aaf-84e7-d9dd13fb4241" path="/var/lib/kubelet/pods/1c099f46-80ac-4aaf-84e7-d9dd13fb4241/volumes" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.738578 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="970366e8-fe49-4945-8223-072bf9227a15" path="/var/lib/kubelet/pods/970366e8-fe49-4945-8223-072bf9227a15/volumes" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.758942 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80f364a3-6407-463e-9565-a3bb43cb1494-scripts\") pod \"keystone-bootstrap-gs27h\" (UID: \"80f364a3-6407-463e-9565-a3bb43cb1494\") " pod="openstack/keystone-bootstrap-gs27h" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.759028 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80f364a3-6407-463e-9565-a3bb43cb1494-combined-ca-bundle\") pod \"keystone-bootstrap-gs27h\" (UID: \"80f364a3-6407-463e-9565-a3bb43cb1494\") " pod="openstack/keystone-bootstrap-gs27h" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.759053 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80f364a3-6407-463e-9565-a3bb43cb1494-config-data\") pod \"keystone-bootstrap-gs27h\" (UID: \"80f364a3-6407-463e-9565-a3bb43cb1494\") " pod="openstack/keystone-bootstrap-gs27h" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.759104 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/80f364a3-6407-463e-9565-a3bb43cb1494-credential-keys\") pod \"keystone-bootstrap-gs27h\" (UID: \"80f364a3-6407-463e-9565-a3bb43cb1494\") " pod="openstack/keystone-bootstrap-gs27h" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.759163 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jqvwn\" (UniqueName: \"kubernetes.io/projected/80f364a3-6407-463e-9565-a3bb43cb1494-kube-api-access-jqvwn\") pod \"keystone-bootstrap-gs27h\" (UID: \"80f364a3-6407-463e-9565-a3bb43cb1494\") " pod="openstack/keystone-bootstrap-gs27h" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.759246 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/80f364a3-6407-463e-9565-a3bb43cb1494-fernet-keys\") pod \"keystone-bootstrap-gs27h\" (UID: \"80f364a3-6407-463e-9565-a3bb43cb1494\") " pod="openstack/keystone-bootstrap-gs27h" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.764966 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80f364a3-6407-463e-9565-a3bb43cb1494-combined-ca-bundle\") pod \"keystone-bootstrap-gs27h\" (UID: \"80f364a3-6407-463e-9565-a3bb43cb1494\") " pod="openstack/keystone-bootstrap-gs27h" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.765629 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/80f364a3-6407-463e-9565-a3bb43cb1494-fernet-keys\") pod \"keystone-bootstrap-gs27h\" (UID: \"80f364a3-6407-463e-9565-a3bb43cb1494\") " pod="openstack/keystone-bootstrap-gs27h" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.766084 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/80f364a3-6407-463e-9565-a3bb43cb1494-credential-keys\") pod \"keystone-bootstrap-gs27h\" (UID: \"80f364a3-6407-463e-9565-a3bb43cb1494\") " pod="openstack/keystone-bootstrap-gs27h" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.767803 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80f364a3-6407-463e-9565-a3bb43cb1494-config-data\") pod \"keystone-bootstrap-gs27h\" (UID: \"80f364a3-6407-463e-9565-a3bb43cb1494\") " pod="openstack/keystone-bootstrap-gs27h" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.776811 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80f364a3-6407-463e-9565-a3bb43cb1494-scripts\") pod \"keystone-bootstrap-gs27h\" (UID: \"80f364a3-6407-463e-9565-a3bb43cb1494\") " pod="openstack/keystone-bootstrap-gs27h" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.779805 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jqvwn\" (UniqueName: \"kubernetes.io/projected/80f364a3-6407-463e-9565-a3bb43cb1494-kube-api-access-jqvwn\") pod \"keystone-bootstrap-gs27h\" (UID: \"80f364a3-6407-463e-9565-a3bb43cb1494\") " pod="openstack/keystone-bootstrap-gs27h" Jan 05 22:11:38 crc kubenswrapper[4910]: I0105 22:11:38.886074 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-gs27h" Jan 05 22:11:39 crc kubenswrapper[4910]: E0105 22:11:39.666083 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:b59b7445e581cc720038107e421371c86c5765b2967e77d884ef29b1d9fd0f49" Jan 05 22:11:39 crc kubenswrapper[4910]: E0105 22:11:39.667152 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:b59b7445e581cc720038107e421371c86c5765b2967e77d884ef29b1d9fd0f49,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fxjfv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-jn5f9_openstack(726618d0-e442-410e-87df-33bca2cf52a4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 05 22:11:39 crc kubenswrapper[4910]: E0105 22:11:39.668376 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-jn5f9" podUID="726618d0-e442-410e-87df-33bca2cf52a4" Jan 05 22:11:39 crc kubenswrapper[4910]: I0105 22:11:39.681922 4910 scope.go:117] "RemoveContainer" containerID="61653c9b11f66a5d23311200ddf64b7f858df6520e2da84a235365818b3587b8" Jan 05 22:11:39 crc kubenswrapper[4910]: I0105 22:11:39.885732 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" Jan 05 22:11:39 crc kubenswrapper[4910]: I0105 22:11:39.982418 4910 generic.go:334] "Generic (PLEG): container finished" podID="b5a1f57c-578a-4396-95b1-e09d6ac92383" containerID="f0973ba8f153769879aba399ae8990d5d2d00746e48c5db9253cff5df721d6f9" exitCode=0 Jan 05 22:11:39 crc kubenswrapper[4910]: I0105 22:11:39.982466 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-4765c" event={"ID":"b5a1f57c-578a-4396-95b1-e09d6ac92383","Type":"ContainerDied","Data":"f0973ba8f153769879aba399ae8990d5d2d00746e48c5db9253cff5df721d6f9"} Jan 05 22:11:39 crc kubenswrapper[4910]: I0105 22:11:39.986491 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/853c3a83-badd-474e-b356-5034158f9450-ovsdbserver-sb\") pod \"853c3a83-badd-474e-b356-5034158f9450\" (UID: \"853c3a83-badd-474e-b356-5034158f9450\") " Jan 05 22:11:39 crc kubenswrapper[4910]: I0105 22:11:39.986580 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/853c3a83-badd-474e-b356-5034158f9450-dns-swift-storage-0\") pod \"853c3a83-badd-474e-b356-5034158f9450\" (UID: \"853c3a83-badd-474e-b356-5034158f9450\") " Jan 05 22:11:39 crc kubenswrapper[4910]: I0105 22:11:39.986653 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/853c3a83-badd-474e-b356-5034158f9450-dns-svc\") pod \"853c3a83-badd-474e-b356-5034158f9450\" (UID: \"853c3a83-badd-474e-b356-5034158f9450\") " Jan 05 22:11:39 crc kubenswrapper[4910]: I0105 22:11:39.986698 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kkdlg\" (UniqueName: \"kubernetes.io/projected/853c3a83-badd-474e-b356-5034158f9450-kube-api-access-kkdlg\") pod \"853c3a83-badd-474e-b356-5034158f9450\" (UID: \"853c3a83-badd-474e-b356-5034158f9450\") " Jan 05 22:11:39 crc kubenswrapper[4910]: I0105 22:11:39.986742 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/853c3a83-badd-474e-b356-5034158f9450-ovsdbserver-nb\") pod \"853c3a83-badd-474e-b356-5034158f9450\" (UID: \"853c3a83-badd-474e-b356-5034158f9450\") " Jan 05 22:11:39 crc kubenswrapper[4910]: I0105 22:11:39.986832 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/853c3a83-badd-474e-b356-5034158f9450-config\") pod \"853c3a83-badd-474e-b356-5034158f9450\" (UID: \"853c3a83-badd-474e-b356-5034158f9450\") " Jan 05 22:11:40 crc kubenswrapper[4910]: I0105 22:11:40.000944 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" Jan 05 22:11:40 crc kubenswrapper[4910]: I0105 22:11:40.000962 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75bdffd66f-fxgpr" event={"ID":"853c3a83-badd-474e-b356-5034158f9450","Type":"ContainerDied","Data":"c4160d50beed7062923fa7e16ab70e929ae39e2f667380d6a560d16e5ccdb0d1"} Jan 05 22:11:40 crc kubenswrapper[4910]: I0105 22:11:40.001137 4910 scope.go:117] "RemoveContainer" containerID="ad0ae345e35fac61af32e0cff6a542a82c46c5fb3aaedcc6e1de4b8d81337d80" Jan 05 22:11:40 crc kubenswrapper[4910]: I0105 22:11:40.002439 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/853c3a83-badd-474e-b356-5034158f9450-kube-api-access-kkdlg" (OuterVolumeSpecName: "kube-api-access-kkdlg") pod "853c3a83-badd-474e-b356-5034158f9450" (UID: "853c3a83-badd-474e-b356-5034158f9450"). InnerVolumeSpecName "kube-api-access-kkdlg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:11:40 crc kubenswrapper[4910]: E0105 22:11:40.018366 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:b59b7445e581cc720038107e421371c86c5765b2967e77d884ef29b1d9fd0f49\\\"\"" pod="openstack/cinder-db-sync-jn5f9" podUID="726618d0-e442-410e-87df-33bca2cf52a4" Jan 05 22:11:40 crc kubenswrapper[4910]: I0105 22:11:40.048921 4910 scope.go:117] "RemoveContainer" containerID="bfc5218769d16b681f150c0c77305c6aec81a9a558d8bbba578d9bb05efbd380" Jan 05 22:11:40 crc kubenswrapper[4910]: I0105 22:11:40.063080 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/853c3a83-badd-474e-b356-5034158f9450-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "853c3a83-badd-474e-b356-5034158f9450" (UID: "853c3a83-badd-474e-b356-5034158f9450"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:11:40 crc kubenswrapper[4910]: I0105 22:11:40.066378 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/853c3a83-badd-474e-b356-5034158f9450-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "853c3a83-badd-474e-b356-5034158f9450" (UID: "853c3a83-badd-474e-b356-5034158f9450"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:11:40 crc kubenswrapper[4910]: I0105 22:11:40.073349 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/853c3a83-badd-474e-b356-5034158f9450-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "853c3a83-badd-474e-b356-5034158f9450" (UID: "853c3a83-badd-474e-b356-5034158f9450"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:11:40 crc kubenswrapper[4910]: I0105 22:11:40.078926 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/853c3a83-badd-474e-b356-5034158f9450-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "853c3a83-badd-474e-b356-5034158f9450" (UID: "853c3a83-badd-474e-b356-5034158f9450"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:11:40 crc kubenswrapper[4910]: I0105 22:11:40.084688 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/853c3a83-badd-474e-b356-5034158f9450-config" (OuterVolumeSpecName: "config") pod "853c3a83-badd-474e-b356-5034158f9450" (UID: "853c3a83-badd-474e-b356-5034158f9450"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:11:40 crc kubenswrapper[4910]: I0105 22:11:40.090915 4910 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/853c3a83-badd-474e-b356-5034158f9450-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:40 crc kubenswrapper[4910]: I0105 22:11:40.090969 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/853c3a83-badd-474e-b356-5034158f9450-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:40 crc kubenswrapper[4910]: I0105 22:11:40.090982 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kkdlg\" (UniqueName: \"kubernetes.io/projected/853c3a83-badd-474e-b356-5034158f9450-kube-api-access-kkdlg\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:40 crc kubenswrapper[4910]: I0105 22:11:40.090994 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/853c3a83-badd-474e-b356-5034158f9450-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:40 crc kubenswrapper[4910]: I0105 22:11:40.091005 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/853c3a83-badd-474e-b356-5034158f9450-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:40 crc kubenswrapper[4910]: I0105 22:11:40.091013 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/853c3a83-badd-474e-b356-5034158f9450-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:40 crc kubenswrapper[4910]: I0105 22:11:40.197563 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-gs27h"] Jan 05 22:11:40 crc kubenswrapper[4910]: W0105 22:11:40.209266 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod80f364a3_6407_463e_9565_a3bb43cb1494.slice/crio-f0c06368cd11636a0a0fcf972e28b9d6b83443c0d955322abc32dd1796e71bf8 WatchSource:0}: Error finding container f0c06368cd11636a0a0fcf972e28b9d6b83443c0d955322abc32dd1796e71bf8: Status 404 returned error can't find the container with id f0c06368cd11636a0a0fcf972e28b9d6b83443c0d955322abc32dd1796e71bf8 Jan 05 22:11:40 crc kubenswrapper[4910]: I0105 22:11:40.329496 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 22:11:40 crc kubenswrapper[4910]: I0105 22:11:40.346762 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75bdffd66f-fxgpr"] Jan 05 22:11:40 crc kubenswrapper[4910]: I0105 22:11:40.352544 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-75bdffd66f-fxgpr"] Jan 05 22:11:40 crc kubenswrapper[4910]: W0105 22:11:40.683487 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3a1e131f_00cf_4724_91e0_52d2766184d9.slice/crio-da8afd18292c1ea76d28e6453baf0e350eca22f2d44109a0756e9815b4251d31 WatchSource:0}: Error finding container da8afd18292c1ea76d28e6453baf0e350eca22f2d44109a0756e9815b4251d31: Status 404 returned error can't find the container with id da8afd18292c1ea76d28e6453baf0e350eca22f2d44109a0756e9815b4251d31 Jan 05 22:11:40 crc kubenswrapper[4910]: I0105 22:11:40.739241 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="853c3a83-badd-474e-b356-5034158f9450" path="/var/lib/kubelet/pods/853c3a83-badd-474e-b356-5034158f9450/volumes" Jan 05 22:11:40 crc kubenswrapper[4910]: I0105 22:11:40.952197 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:11:40 crc kubenswrapper[4910]: I0105 22:11:40.952266 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:11:41 crc kubenswrapper[4910]: I0105 22:11:41.058684 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-gs27h" event={"ID":"80f364a3-6407-463e-9565-a3bb43cb1494","Type":"ContainerStarted","Data":"57959d13503bb849c6442bdf3d6f0c3ed65925527985e5fdecd977b087aed4b3"} Jan 05 22:11:41 crc kubenswrapper[4910]: I0105 22:11:41.059217 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-gs27h" event={"ID":"80f364a3-6407-463e-9565-a3bb43cb1494","Type":"ContainerStarted","Data":"f0c06368cd11636a0a0fcf972e28b9d6b83443c0d955322abc32dd1796e71bf8"} Jan 05 22:11:41 crc kubenswrapper[4910]: I0105 22:11:41.061553 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-dgttl" event={"ID":"4432a67a-7276-4f55-838d-b685529581d5","Type":"ContainerStarted","Data":"5966fa5734c3ed51558c3d877b4b5b45ed7556cf861a0f54ae98be8bd16f7a20"} Jan 05 22:11:41 crc kubenswrapper[4910]: I0105 22:11:41.068023 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"3a1e131f-00cf-4724-91e0-52d2766184d9","Type":"ContainerStarted","Data":"da8afd18292c1ea76d28e6453baf0e350eca22f2d44109a0756e9815b4251d31"} Jan 05 22:11:41 crc kubenswrapper[4910]: I0105 22:11:41.071059 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="1baf2afa-a481-4037-b114-68e8691f8486" containerName="glance-log" containerID="cri-o://3e81889900ecfb81c9033d67992d4ae8127699fac80983153cbb79ebc2f5ca35" gracePeriod=30 Jan 05 22:11:41 crc kubenswrapper[4910]: I0105 22:11:41.071213 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1baf2afa-a481-4037-b114-68e8691f8486","Type":"ContainerStarted","Data":"0ae7c587a980de588b6677ace00effe4ef8fa3405de094d1d448897e51af76fc"} Jan 05 22:11:41 crc kubenswrapper[4910]: I0105 22:11:41.071298 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="1baf2afa-a481-4037-b114-68e8691f8486" containerName="glance-httpd" containerID="cri-o://0ae7c587a980de588b6677ace00effe4ef8fa3405de094d1d448897e51af76fc" gracePeriod=30 Jan 05 22:11:41 crc kubenswrapper[4910]: I0105 22:11:41.094425 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-gs27h" podStartSLOduration=3.094400983 podStartE2EDuration="3.094400983s" podCreationTimestamp="2026-01-05 22:11:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:11:41.083360977 +0000 UTC m=+1232.660858647" watchObservedRunningTime="2026-01-05 22:11:41.094400983 +0000 UTC m=+1232.671898653" Jan 05 22:11:41 crc kubenswrapper[4910]: I0105 22:11:41.141066 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=26.141039671 podStartE2EDuration="26.141039671s" podCreationTimestamp="2026-01-05 22:11:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:11:41.111481931 +0000 UTC m=+1232.688979601" watchObservedRunningTime="2026-01-05 22:11:41.141039671 +0000 UTC m=+1232.718537341" Jan 05 22:11:41 crc kubenswrapper[4910]: I0105 22:11:41.171880 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-dgttl" podStartSLOduration=5.475121075 podStartE2EDuration="30.171855863s" podCreationTimestamp="2026-01-05 22:11:11 +0000 UTC" firstStartedPulling="2026-01-05 22:11:13.104767758 +0000 UTC m=+1204.682265428" lastFinishedPulling="2026-01-05 22:11:37.801502546 +0000 UTC m=+1229.379000216" observedRunningTime="2026-01-05 22:11:41.151724729 +0000 UTC m=+1232.729222419" watchObservedRunningTime="2026-01-05 22:11:41.171855863 +0000 UTC m=+1232.749353533" Jan 05 22:11:41 crc kubenswrapper[4910]: I0105 22:11:41.509986 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-4765c" Jan 05 22:11:41 crc kubenswrapper[4910]: I0105 22:11:41.636197 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n8mlr\" (UniqueName: \"kubernetes.io/projected/b5a1f57c-578a-4396-95b1-e09d6ac92383-kube-api-access-n8mlr\") pod \"b5a1f57c-578a-4396-95b1-e09d6ac92383\" (UID: \"b5a1f57c-578a-4396-95b1-e09d6ac92383\") " Jan 05 22:11:41 crc kubenswrapper[4910]: I0105 22:11:41.636509 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b5a1f57c-578a-4396-95b1-e09d6ac92383-config\") pod \"b5a1f57c-578a-4396-95b1-e09d6ac92383\" (UID: \"b5a1f57c-578a-4396-95b1-e09d6ac92383\") " Jan 05 22:11:41 crc kubenswrapper[4910]: I0105 22:11:41.636573 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5a1f57c-578a-4396-95b1-e09d6ac92383-combined-ca-bundle\") pod \"b5a1f57c-578a-4396-95b1-e09d6ac92383\" (UID: \"b5a1f57c-578a-4396-95b1-e09d6ac92383\") " Jan 05 22:11:41 crc kubenswrapper[4910]: I0105 22:11:41.641179 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5a1f57c-578a-4396-95b1-e09d6ac92383-kube-api-access-n8mlr" (OuterVolumeSpecName: "kube-api-access-n8mlr") pod "b5a1f57c-578a-4396-95b1-e09d6ac92383" (UID: "b5a1f57c-578a-4396-95b1-e09d6ac92383"). InnerVolumeSpecName "kube-api-access-n8mlr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:11:41 crc kubenswrapper[4910]: I0105 22:11:41.666356 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5a1f57c-578a-4396-95b1-e09d6ac92383-config" (OuterVolumeSpecName: "config") pod "b5a1f57c-578a-4396-95b1-e09d6ac92383" (UID: "b5a1f57c-578a-4396-95b1-e09d6ac92383"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:11:41 crc kubenswrapper[4910]: I0105 22:11:41.668321 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5a1f57c-578a-4396-95b1-e09d6ac92383-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b5a1f57c-578a-4396-95b1-e09d6ac92383" (UID: "b5a1f57c-578a-4396-95b1-e09d6ac92383"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:11:41 crc kubenswrapper[4910]: I0105 22:11:41.738973 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/b5a1f57c-578a-4396-95b1-e09d6ac92383-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:41 crc kubenswrapper[4910]: I0105 22:11:41.739087 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5a1f57c-578a-4396-95b1-e09d6ac92383-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:41 crc kubenswrapper[4910]: I0105 22:11:41.739104 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n8mlr\" (UniqueName: \"kubernetes.io/projected/b5a1f57c-578a-4396-95b1-e09d6ac92383-kube-api-access-n8mlr\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.127214 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-4765c" event={"ID":"b5a1f57c-578a-4396-95b1-e09d6ac92383","Type":"ContainerDied","Data":"69254fbc641112c4b31c47925e9625849e1913994d0de8336371e41b5b2e7077"} Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.127281 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="69254fbc641112c4b31c47925e9625849e1913994d0de8336371e41b5b2e7077" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.127401 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-4765c" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.133300 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"3a1e131f-00cf-4724-91e0-52d2766184d9","Type":"ContainerStarted","Data":"46d06fca16e6d7362ccaa51a4b5864275f1634fd78eaaef4118ec61bdc8a1f46"} Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.157321 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac","Type":"ContainerStarted","Data":"be213fc6c3c4cd181483e6f8d5f930cbfb3f3f94dff00ab7b6b2c04364f113bb"} Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.169936 4910 generic.go:334] "Generic (PLEG): container finished" podID="1baf2afa-a481-4037-b114-68e8691f8486" containerID="0ae7c587a980de588b6677ace00effe4ef8fa3405de094d1d448897e51af76fc" exitCode=0 Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.169981 4910 generic.go:334] "Generic (PLEG): container finished" podID="1baf2afa-a481-4037-b114-68e8691f8486" containerID="3e81889900ecfb81c9033d67992d4ae8127699fac80983153cbb79ebc2f5ca35" exitCode=143 Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.170042 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1baf2afa-a481-4037-b114-68e8691f8486","Type":"ContainerDied","Data":"0ae7c587a980de588b6677ace00effe4ef8fa3405de094d1d448897e51af76fc"} Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.170135 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1baf2afa-a481-4037-b114-68e8691f8486","Type":"ContainerDied","Data":"3e81889900ecfb81c9033d67992d4ae8127699fac80983153cbb79ebc2f5ca35"} Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.180033 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.251492 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7bb67c87c9-5lz66"] Jan 05 22:11:42 crc kubenswrapper[4910]: E0105 22:11:42.252248 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1baf2afa-a481-4037-b114-68e8691f8486" containerName="glance-log" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.252271 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="1baf2afa-a481-4037-b114-68e8691f8486" containerName="glance-log" Jan 05 22:11:42 crc kubenswrapper[4910]: E0105 22:11:42.252298 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1baf2afa-a481-4037-b114-68e8691f8486" containerName="glance-httpd" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.252307 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="1baf2afa-a481-4037-b114-68e8691f8486" containerName="glance-httpd" Jan 05 22:11:42 crc kubenswrapper[4910]: E0105 22:11:42.252341 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="853c3a83-badd-474e-b356-5034158f9450" containerName="dnsmasq-dns" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.252350 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="853c3a83-badd-474e-b356-5034158f9450" containerName="dnsmasq-dns" Jan 05 22:11:42 crc kubenswrapper[4910]: E0105 22:11:42.252360 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="853c3a83-badd-474e-b356-5034158f9450" containerName="init" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.252367 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="853c3a83-badd-474e-b356-5034158f9450" containerName="init" Jan 05 22:11:42 crc kubenswrapper[4910]: E0105 22:11:42.252383 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5a1f57c-578a-4396-95b1-e09d6ac92383" containerName="neutron-db-sync" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.252391 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5a1f57c-578a-4396-95b1-e09d6ac92383" containerName="neutron-db-sync" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.252621 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="1baf2afa-a481-4037-b114-68e8691f8486" containerName="glance-httpd" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.252642 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5a1f57c-578a-4396-95b1-e09d6ac92383" containerName="neutron-db-sync" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.252658 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="1baf2afa-a481-4037-b114-68e8691f8486" containerName="glance-log" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.252669 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="853c3a83-badd-474e-b356-5034158f9450" containerName="dnsmasq-dns" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.253776 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bb67c87c9-5lz66" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.254992 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1baf2afa-a481-4037-b114-68e8691f8486-httpd-run\") pod \"1baf2afa-a481-4037-b114-68e8691f8486\" (UID: \"1baf2afa-a481-4037-b114-68e8691f8486\") " Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.255033 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1baf2afa-a481-4037-b114-68e8691f8486-combined-ca-bundle\") pod \"1baf2afa-a481-4037-b114-68e8691f8486\" (UID: \"1baf2afa-a481-4037-b114-68e8691f8486\") " Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.255154 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1baf2afa-a481-4037-b114-68e8691f8486-scripts\") pod \"1baf2afa-a481-4037-b114-68e8691f8486\" (UID: \"1baf2afa-a481-4037-b114-68e8691f8486\") " Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.256849 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1baf2afa-a481-4037-b114-68e8691f8486-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "1baf2afa-a481-4037-b114-68e8691f8486" (UID: "1baf2afa-a481-4037-b114-68e8691f8486"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.262305 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"1baf2afa-a481-4037-b114-68e8691f8486\" (UID: \"1baf2afa-a481-4037-b114-68e8691f8486\") " Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.262414 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1baf2afa-a481-4037-b114-68e8691f8486-config-data\") pod \"1baf2afa-a481-4037-b114-68e8691f8486\" (UID: \"1baf2afa-a481-4037-b114-68e8691f8486\") " Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.262506 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5lxkb\" (UniqueName: \"kubernetes.io/projected/1baf2afa-a481-4037-b114-68e8691f8486-kube-api-access-5lxkb\") pod \"1baf2afa-a481-4037-b114-68e8691f8486\" (UID: \"1baf2afa-a481-4037-b114-68e8691f8486\") " Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.262558 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1baf2afa-a481-4037-b114-68e8691f8486-logs\") pod \"1baf2afa-a481-4037-b114-68e8691f8486\" (UID: \"1baf2afa-a481-4037-b114-68e8691f8486\") " Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.263345 4910 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1baf2afa-a481-4037-b114-68e8691f8486-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.272573 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1baf2afa-a481-4037-b114-68e8691f8486-logs" (OuterVolumeSpecName: "logs") pod "1baf2afa-a481-4037-b114-68e8691f8486" (UID: "1baf2afa-a481-4037-b114-68e8691f8486"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.284139 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "1baf2afa-a481-4037-b114-68e8691f8486" (UID: "1baf2afa-a481-4037-b114-68e8691f8486"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.284337 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1baf2afa-a481-4037-b114-68e8691f8486-kube-api-access-5lxkb" (OuterVolumeSpecName: "kube-api-access-5lxkb") pod "1baf2afa-a481-4037-b114-68e8691f8486" (UID: "1baf2afa-a481-4037-b114-68e8691f8486"). InnerVolumeSpecName "kube-api-access-5lxkb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.284595 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1baf2afa-a481-4037-b114-68e8691f8486-scripts" (OuterVolumeSpecName: "scripts") pod "1baf2afa-a481-4037-b114-68e8691f8486" (UID: "1baf2afa-a481-4037-b114-68e8691f8486"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.310352 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bb67c87c9-5lz66"] Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.353346 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1baf2afa-a481-4037-b114-68e8691f8486-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1baf2afa-a481-4037-b114-68e8691f8486" (UID: "1baf2afa-a481-4037-b114-68e8691f8486"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.366635 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9qwlh\" (UniqueName: \"kubernetes.io/projected/b4174802-d96e-4046-8155-b22de9fa615f-kube-api-access-9qwlh\") pod \"dnsmasq-dns-7bb67c87c9-5lz66\" (UID: \"b4174802-d96e-4046-8155-b22de9fa615f\") " pod="openstack/dnsmasq-dns-7bb67c87c9-5lz66" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.366682 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4174802-d96e-4046-8155-b22de9fa615f-config\") pod \"dnsmasq-dns-7bb67c87c9-5lz66\" (UID: \"b4174802-d96e-4046-8155-b22de9fa615f\") " pod="openstack/dnsmasq-dns-7bb67c87c9-5lz66" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.366708 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b4174802-d96e-4046-8155-b22de9fa615f-dns-swift-storage-0\") pod \"dnsmasq-dns-7bb67c87c9-5lz66\" (UID: \"b4174802-d96e-4046-8155-b22de9fa615f\") " pod="openstack/dnsmasq-dns-7bb67c87c9-5lz66" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.366761 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b4174802-d96e-4046-8155-b22de9fa615f-dns-svc\") pod \"dnsmasq-dns-7bb67c87c9-5lz66\" (UID: \"b4174802-d96e-4046-8155-b22de9fa615f\") " pod="openstack/dnsmasq-dns-7bb67c87c9-5lz66" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.366793 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b4174802-d96e-4046-8155-b22de9fa615f-ovsdbserver-sb\") pod \"dnsmasq-dns-7bb67c87c9-5lz66\" (UID: \"b4174802-d96e-4046-8155-b22de9fa615f\") " pod="openstack/dnsmasq-dns-7bb67c87c9-5lz66" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.366828 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b4174802-d96e-4046-8155-b22de9fa615f-ovsdbserver-nb\") pod \"dnsmasq-dns-7bb67c87c9-5lz66\" (UID: \"b4174802-d96e-4046-8155-b22de9fa615f\") " pod="openstack/dnsmasq-dns-7bb67c87c9-5lz66" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.366912 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5lxkb\" (UniqueName: \"kubernetes.io/projected/1baf2afa-a481-4037-b114-68e8691f8486-kube-api-access-5lxkb\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.366923 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1baf2afa-a481-4037-b114-68e8691f8486-logs\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.366935 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1baf2afa-a481-4037-b114-68e8691f8486-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.366942 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1baf2afa-a481-4037-b114-68e8691f8486-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.366962 4910 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.388220 4910 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.434390 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1baf2afa-a481-4037-b114-68e8691f8486-config-data" (OuterVolumeSpecName: "config-data") pod "1baf2afa-a481-4037-b114-68e8691f8486" (UID: "1baf2afa-a481-4037-b114-68e8691f8486"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.479552 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9qwlh\" (UniqueName: \"kubernetes.io/projected/b4174802-d96e-4046-8155-b22de9fa615f-kube-api-access-9qwlh\") pod \"dnsmasq-dns-7bb67c87c9-5lz66\" (UID: \"b4174802-d96e-4046-8155-b22de9fa615f\") " pod="openstack/dnsmasq-dns-7bb67c87c9-5lz66" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.479603 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4174802-d96e-4046-8155-b22de9fa615f-config\") pod \"dnsmasq-dns-7bb67c87c9-5lz66\" (UID: \"b4174802-d96e-4046-8155-b22de9fa615f\") " pod="openstack/dnsmasq-dns-7bb67c87c9-5lz66" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.479624 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b4174802-d96e-4046-8155-b22de9fa615f-dns-swift-storage-0\") pod \"dnsmasq-dns-7bb67c87c9-5lz66\" (UID: \"b4174802-d96e-4046-8155-b22de9fa615f\") " pod="openstack/dnsmasq-dns-7bb67c87c9-5lz66" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.479667 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b4174802-d96e-4046-8155-b22de9fa615f-dns-svc\") pod \"dnsmasq-dns-7bb67c87c9-5lz66\" (UID: \"b4174802-d96e-4046-8155-b22de9fa615f\") " pod="openstack/dnsmasq-dns-7bb67c87c9-5lz66" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.479693 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b4174802-d96e-4046-8155-b22de9fa615f-ovsdbserver-sb\") pod \"dnsmasq-dns-7bb67c87c9-5lz66\" (UID: \"b4174802-d96e-4046-8155-b22de9fa615f\") " pod="openstack/dnsmasq-dns-7bb67c87c9-5lz66" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.479722 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b4174802-d96e-4046-8155-b22de9fa615f-ovsdbserver-nb\") pod \"dnsmasq-dns-7bb67c87c9-5lz66\" (UID: \"b4174802-d96e-4046-8155-b22de9fa615f\") " pod="openstack/dnsmasq-dns-7bb67c87c9-5lz66" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.479772 4910 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.479794 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1baf2afa-a481-4037-b114-68e8691f8486-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.480632 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b4174802-d96e-4046-8155-b22de9fa615f-ovsdbserver-nb\") pod \"dnsmasq-dns-7bb67c87c9-5lz66\" (UID: \"b4174802-d96e-4046-8155-b22de9fa615f\") " pod="openstack/dnsmasq-dns-7bb67c87c9-5lz66" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.481458 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4174802-d96e-4046-8155-b22de9fa615f-config\") pod \"dnsmasq-dns-7bb67c87c9-5lz66\" (UID: \"b4174802-d96e-4046-8155-b22de9fa615f\") " pod="openstack/dnsmasq-dns-7bb67c87c9-5lz66" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.481964 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b4174802-d96e-4046-8155-b22de9fa615f-dns-swift-storage-0\") pod \"dnsmasq-dns-7bb67c87c9-5lz66\" (UID: \"b4174802-d96e-4046-8155-b22de9fa615f\") " pod="openstack/dnsmasq-dns-7bb67c87c9-5lz66" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.482728 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b4174802-d96e-4046-8155-b22de9fa615f-dns-svc\") pod \"dnsmasq-dns-7bb67c87c9-5lz66\" (UID: \"b4174802-d96e-4046-8155-b22de9fa615f\") " pod="openstack/dnsmasq-dns-7bb67c87c9-5lz66" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.483392 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b4174802-d96e-4046-8155-b22de9fa615f-ovsdbserver-sb\") pod \"dnsmasq-dns-7bb67c87c9-5lz66\" (UID: \"b4174802-d96e-4046-8155-b22de9fa615f\") " pod="openstack/dnsmasq-dns-7bb67c87c9-5lz66" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.507374 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9qwlh\" (UniqueName: \"kubernetes.io/projected/b4174802-d96e-4046-8155-b22de9fa615f-kube-api-access-9qwlh\") pod \"dnsmasq-dns-7bb67c87c9-5lz66\" (UID: \"b4174802-d96e-4046-8155-b22de9fa615f\") " pod="openstack/dnsmasq-dns-7bb67c87c9-5lz66" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.509603 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5c84f4b854-d9fkv"] Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.511949 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5c84f4b854-d9fkv" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.518250 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-9hgph" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.518310 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.518255 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.518596 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.529027 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5c84f4b854-d9fkv"] Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.581399 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/07d78534-34ad-40ac-963e-605d72b91c82-httpd-config\") pod \"neutron-5c84f4b854-d9fkv\" (UID: \"07d78534-34ad-40ac-963e-605d72b91c82\") " pod="openstack/neutron-5c84f4b854-d9fkv" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.581935 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07d78534-34ad-40ac-963e-605d72b91c82-combined-ca-bundle\") pod \"neutron-5c84f4b854-d9fkv\" (UID: \"07d78534-34ad-40ac-963e-605d72b91c82\") " pod="openstack/neutron-5c84f4b854-d9fkv" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.582048 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/07d78534-34ad-40ac-963e-605d72b91c82-ovndb-tls-certs\") pod \"neutron-5c84f4b854-d9fkv\" (UID: \"07d78534-34ad-40ac-963e-605d72b91c82\") " pod="openstack/neutron-5c84f4b854-d9fkv" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.582543 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/07d78534-34ad-40ac-963e-605d72b91c82-config\") pod \"neutron-5c84f4b854-d9fkv\" (UID: \"07d78534-34ad-40ac-963e-605d72b91c82\") " pod="openstack/neutron-5c84f4b854-d9fkv" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.582788 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kd4xf\" (UniqueName: \"kubernetes.io/projected/07d78534-34ad-40ac-963e-605d72b91c82-kube-api-access-kd4xf\") pod \"neutron-5c84f4b854-d9fkv\" (UID: \"07d78534-34ad-40ac-963e-605d72b91c82\") " pod="openstack/neutron-5c84f4b854-d9fkv" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.684916 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/07d78534-34ad-40ac-963e-605d72b91c82-config\") pod \"neutron-5c84f4b854-d9fkv\" (UID: \"07d78534-34ad-40ac-963e-605d72b91c82\") " pod="openstack/neutron-5c84f4b854-d9fkv" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.685358 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kd4xf\" (UniqueName: \"kubernetes.io/projected/07d78534-34ad-40ac-963e-605d72b91c82-kube-api-access-kd4xf\") pod \"neutron-5c84f4b854-d9fkv\" (UID: \"07d78534-34ad-40ac-963e-605d72b91c82\") " pod="openstack/neutron-5c84f4b854-d9fkv" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.685432 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/07d78534-34ad-40ac-963e-605d72b91c82-httpd-config\") pod \"neutron-5c84f4b854-d9fkv\" (UID: \"07d78534-34ad-40ac-963e-605d72b91c82\") " pod="openstack/neutron-5c84f4b854-d9fkv" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.685458 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07d78534-34ad-40ac-963e-605d72b91c82-combined-ca-bundle\") pod \"neutron-5c84f4b854-d9fkv\" (UID: \"07d78534-34ad-40ac-963e-605d72b91c82\") " pod="openstack/neutron-5c84f4b854-d9fkv" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.685491 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/07d78534-34ad-40ac-963e-605d72b91c82-ovndb-tls-certs\") pod \"neutron-5c84f4b854-d9fkv\" (UID: \"07d78534-34ad-40ac-963e-605d72b91c82\") " pod="openstack/neutron-5c84f4b854-d9fkv" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.690750 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/07d78534-34ad-40ac-963e-605d72b91c82-config\") pod \"neutron-5c84f4b854-d9fkv\" (UID: \"07d78534-34ad-40ac-963e-605d72b91c82\") " pod="openstack/neutron-5c84f4b854-d9fkv" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.691791 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/07d78534-34ad-40ac-963e-605d72b91c82-ovndb-tls-certs\") pod \"neutron-5c84f4b854-d9fkv\" (UID: \"07d78534-34ad-40ac-963e-605d72b91c82\") " pod="openstack/neutron-5c84f4b854-d9fkv" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.693187 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/07d78534-34ad-40ac-963e-605d72b91c82-httpd-config\") pod \"neutron-5c84f4b854-d9fkv\" (UID: \"07d78534-34ad-40ac-963e-605d72b91c82\") " pod="openstack/neutron-5c84f4b854-d9fkv" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.696623 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07d78534-34ad-40ac-963e-605d72b91c82-combined-ca-bundle\") pod \"neutron-5c84f4b854-d9fkv\" (UID: \"07d78534-34ad-40ac-963e-605d72b91c82\") " pod="openstack/neutron-5c84f4b854-d9fkv" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.717010 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kd4xf\" (UniqueName: \"kubernetes.io/projected/07d78534-34ad-40ac-963e-605d72b91c82-kube-api-access-kd4xf\") pod \"neutron-5c84f4b854-d9fkv\" (UID: \"07d78534-34ad-40ac-963e-605d72b91c82\") " pod="openstack/neutron-5c84f4b854-d9fkv" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.758851 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bb67c87c9-5lz66" Jan 05 22:11:42 crc kubenswrapper[4910]: I0105 22:11:42.867234 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5c84f4b854-d9fkv" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.202090 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1baf2afa-a481-4037-b114-68e8691f8486","Type":"ContainerDied","Data":"e1a917cc506e884deb60fd75ab7cd04509ff31da01327eda8e501f33638f84bb"} Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.202531 4910 scope.go:117] "RemoveContainer" containerID="0ae7c587a980de588b6677ace00effe4ef8fa3405de094d1d448897e51af76fc" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.202728 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.214949 4910 generic.go:334] "Generic (PLEG): container finished" podID="4432a67a-7276-4f55-838d-b685529581d5" containerID="5966fa5734c3ed51558c3d877b4b5b45ed7556cf861a0f54ae98be8bd16f7a20" exitCode=0 Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.215036 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-dgttl" event={"ID":"4432a67a-7276-4f55-838d-b685529581d5","Type":"ContainerDied","Data":"5966fa5734c3ed51558c3d877b4b5b45ed7556cf861a0f54ae98be8bd16f7a20"} Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.225358 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"3a1e131f-00cf-4724-91e0-52d2766184d9","Type":"ContainerStarted","Data":"4bb93c94da9335681c7dcfe1904cc909f089d4f34d01f8611101a36972f9245d"} Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.260571 4910 scope.go:117] "RemoveContainer" containerID="3e81889900ecfb81c9033d67992d4ae8127699fac80983153cbb79ebc2f5ca35" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.273054 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.290355 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.307196 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.309160 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.312812 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.313003 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.343551 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.348643 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=6.348615085 podStartE2EDuration="6.348615085s" podCreationTimestamp="2026-01-05 22:11:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:11:43.292560731 +0000 UTC m=+1234.870058401" watchObservedRunningTime="2026-01-05 22:11:43.348615085 +0000 UTC m=+1234.926112755" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.361497 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bb67c87c9-5lz66"] Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.406224 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.406699 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.406739 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfrnt\" (UniqueName: \"kubernetes.io/projected/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-kube-api-access-cfrnt\") pod \"glance-default-internal-api-0\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.406766 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-logs\") pod \"glance-default-internal-api-0\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.406817 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.406865 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.406900 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.406977 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.508261 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.508332 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.508367 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfrnt\" (UniqueName: \"kubernetes.io/projected/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-kube-api-access-cfrnt\") pod \"glance-default-internal-api-0\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.508393 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-logs\") pod \"glance-default-internal-api-0\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.508433 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.508471 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.508505 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.508554 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.509168 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.509447 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-logs\") pod \"glance-default-internal-api-0\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.510033 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-internal-api-0" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.518427 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.521963 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.523466 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.529213 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.582884 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfrnt\" (UniqueName: \"kubernetes.io/projected/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-kube-api-access-cfrnt\") pod \"glance-default-internal-api-0\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.657289 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.772911 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5c84f4b854-d9fkv"] Jan 05 22:11:43 crc kubenswrapper[4910]: W0105 22:11:43.793632 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod07d78534_34ad_40ac_963e_605d72b91c82.slice/crio-05ee7aff1e282fd692216dff37c458555df5a52be809c14f544c92b8b3ae9cf0 WatchSource:0}: Error finding container 05ee7aff1e282fd692216dff37c458555df5a52be809c14f544c92b8b3ae9cf0: Status 404 returned error can't find the container with id 05ee7aff1e282fd692216dff37c458555df5a52be809c14f544c92b8b3ae9cf0 Jan 05 22:11:43 crc kubenswrapper[4910]: I0105 22:11:43.941419 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 05 22:11:44 crc kubenswrapper[4910]: I0105 22:11:44.246578 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c84f4b854-d9fkv" event={"ID":"07d78534-34ad-40ac-963e-605d72b91c82","Type":"ContainerStarted","Data":"ea7d4a42975da37a49580a32164311257ae5b524f07e7d50b91f1491db9df89f"} Jan 05 22:11:44 crc kubenswrapper[4910]: I0105 22:11:44.247100 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c84f4b854-d9fkv" event={"ID":"07d78534-34ad-40ac-963e-605d72b91c82","Type":"ContainerStarted","Data":"05ee7aff1e282fd692216dff37c458555df5a52be809c14f544c92b8b3ae9cf0"} Jan 05 22:11:44 crc kubenswrapper[4910]: I0105 22:11:44.258441 4910 generic.go:334] "Generic (PLEG): container finished" podID="b4174802-d96e-4046-8155-b22de9fa615f" containerID="f57bdcc6c7bf04d10da5300531991fee00308faf910cbfac9ee7f4252d9907dc" exitCode=0 Jan 05 22:11:44 crc kubenswrapper[4910]: I0105 22:11:44.258566 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bb67c87c9-5lz66" event={"ID":"b4174802-d96e-4046-8155-b22de9fa615f","Type":"ContainerDied","Data":"f57bdcc6c7bf04d10da5300531991fee00308faf910cbfac9ee7f4252d9907dc"} Jan 05 22:11:44 crc kubenswrapper[4910]: I0105 22:11:44.258677 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bb67c87c9-5lz66" event={"ID":"b4174802-d96e-4046-8155-b22de9fa615f","Type":"ContainerStarted","Data":"cd9d9b719a1ff22f45f64ecf4babd7d6b3f568a59c29630240af7004f3e00e71"} Jan 05 22:11:44 crc kubenswrapper[4910]: I0105 22:11:44.390313 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 22:11:44 crc kubenswrapper[4910]: I0105 22:11:44.588365 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-dgttl" Jan 05 22:11:44 crc kubenswrapper[4910]: I0105 22:11:44.740198 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1baf2afa-a481-4037-b114-68e8691f8486" path="/var/lib/kubelet/pods/1baf2afa-a481-4037-b114-68e8691f8486/volumes" Jan 05 22:11:44 crc kubenswrapper[4910]: I0105 22:11:44.761542 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4432a67a-7276-4f55-838d-b685529581d5-combined-ca-bundle\") pod \"4432a67a-7276-4f55-838d-b685529581d5\" (UID: \"4432a67a-7276-4f55-838d-b685529581d5\") " Jan 05 22:11:44 crc kubenswrapper[4910]: I0105 22:11:44.761638 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dn5qm\" (UniqueName: \"kubernetes.io/projected/4432a67a-7276-4f55-838d-b685529581d5-kube-api-access-dn5qm\") pod \"4432a67a-7276-4f55-838d-b685529581d5\" (UID: \"4432a67a-7276-4f55-838d-b685529581d5\") " Jan 05 22:11:44 crc kubenswrapper[4910]: I0105 22:11:44.761695 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4432a67a-7276-4f55-838d-b685529581d5-config-data\") pod \"4432a67a-7276-4f55-838d-b685529581d5\" (UID: \"4432a67a-7276-4f55-838d-b685529581d5\") " Jan 05 22:11:44 crc kubenswrapper[4910]: I0105 22:11:44.761746 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4432a67a-7276-4f55-838d-b685529581d5-scripts\") pod \"4432a67a-7276-4f55-838d-b685529581d5\" (UID: \"4432a67a-7276-4f55-838d-b685529581d5\") " Jan 05 22:11:44 crc kubenswrapper[4910]: I0105 22:11:44.761791 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4432a67a-7276-4f55-838d-b685529581d5-logs\") pod \"4432a67a-7276-4f55-838d-b685529581d5\" (UID: \"4432a67a-7276-4f55-838d-b685529581d5\") " Jan 05 22:11:44 crc kubenswrapper[4910]: I0105 22:11:44.763569 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4432a67a-7276-4f55-838d-b685529581d5-logs" (OuterVolumeSpecName: "logs") pod "4432a67a-7276-4f55-838d-b685529581d5" (UID: "4432a67a-7276-4f55-838d-b685529581d5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:11:44 crc kubenswrapper[4910]: I0105 22:11:44.784472 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4432a67a-7276-4f55-838d-b685529581d5-kube-api-access-dn5qm" (OuterVolumeSpecName: "kube-api-access-dn5qm") pod "4432a67a-7276-4f55-838d-b685529581d5" (UID: "4432a67a-7276-4f55-838d-b685529581d5"). InnerVolumeSpecName "kube-api-access-dn5qm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:11:44 crc kubenswrapper[4910]: I0105 22:11:44.785829 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4432a67a-7276-4f55-838d-b685529581d5-scripts" (OuterVolumeSpecName: "scripts") pod "4432a67a-7276-4f55-838d-b685529581d5" (UID: "4432a67a-7276-4f55-838d-b685529581d5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:11:44 crc kubenswrapper[4910]: I0105 22:11:44.806448 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4432a67a-7276-4f55-838d-b685529581d5-config-data" (OuterVolumeSpecName: "config-data") pod "4432a67a-7276-4f55-838d-b685529581d5" (UID: "4432a67a-7276-4f55-838d-b685529581d5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:11:44 crc kubenswrapper[4910]: I0105 22:11:44.806465 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4432a67a-7276-4f55-838d-b685529581d5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4432a67a-7276-4f55-838d-b685529581d5" (UID: "4432a67a-7276-4f55-838d-b685529581d5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:11:44 crc kubenswrapper[4910]: I0105 22:11:44.864595 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4432a67a-7276-4f55-838d-b685529581d5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:44 crc kubenswrapper[4910]: I0105 22:11:44.864633 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dn5qm\" (UniqueName: \"kubernetes.io/projected/4432a67a-7276-4f55-838d-b685529581d5-kube-api-access-dn5qm\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:44 crc kubenswrapper[4910]: I0105 22:11:44.864649 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4432a67a-7276-4f55-838d-b685529581d5-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:44 crc kubenswrapper[4910]: I0105 22:11:44.864658 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4432a67a-7276-4f55-838d-b685529581d5-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:44 crc kubenswrapper[4910]: I0105 22:11:44.864670 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4432a67a-7276-4f55-838d-b685529581d5-logs\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.138914 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-6c69d8c8f7-7w2gb"] Jan 05 22:11:45 crc kubenswrapper[4910]: E0105 22:11:45.141012 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4432a67a-7276-4f55-838d-b685529581d5" containerName="placement-db-sync" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.141036 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4432a67a-7276-4f55-838d-b685529581d5" containerName="placement-db-sync" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.141268 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4432a67a-7276-4f55-838d-b685529581d5" containerName="placement-db-sync" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.142374 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6c69d8c8f7-7w2gb" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.146640 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.147058 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.162275 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6c69d8c8f7-7w2gb"] Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.277281 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-config\") pod \"neutron-6c69d8c8f7-7w2gb\" (UID: \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\") " pod="openstack/neutron-6c69d8c8f7-7w2gb" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.277602 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-public-tls-certs\") pod \"neutron-6c69d8c8f7-7w2gb\" (UID: \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\") " pod="openstack/neutron-6c69d8c8f7-7w2gb" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.277643 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-httpd-config\") pod \"neutron-6c69d8c8f7-7w2gb\" (UID: \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\") " pod="openstack/neutron-6c69d8c8f7-7w2gb" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.277684 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ftrb5\" (UniqueName: \"kubernetes.io/projected/227b48c0-2e23-4048-8fb5-21628bd9e5e0-kube-api-access-ftrb5\") pod \"neutron-6c69d8c8f7-7w2gb\" (UID: \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\") " pod="openstack/neutron-6c69d8c8f7-7w2gb" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.277723 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-ovndb-tls-certs\") pod \"neutron-6c69d8c8f7-7w2gb\" (UID: \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\") " pod="openstack/neutron-6c69d8c8f7-7w2gb" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.277904 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-internal-tls-certs\") pod \"neutron-6c69d8c8f7-7w2gb\" (UID: \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\") " pod="openstack/neutron-6c69d8c8f7-7w2gb" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.277958 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-combined-ca-bundle\") pod \"neutron-6c69d8c8f7-7w2gb\" (UID: \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\") " pod="openstack/neutron-6c69d8c8f7-7w2gb" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.286450 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bb67c87c9-5lz66" event={"ID":"b4174802-d96e-4046-8155-b22de9fa615f","Type":"ContainerStarted","Data":"4166741f153128dd443f3316d4b42e92c05a8452c6b7fa423468d7f4f83e62f3"} Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.287182 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7bb67c87c9-5lz66" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.297054 4910 generic.go:334] "Generic (PLEG): container finished" podID="80f364a3-6407-463e-9565-a3bb43cb1494" containerID="57959d13503bb849c6442bdf3d6f0c3ed65925527985e5fdecd977b087aed4b3" exitCode=0 Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.297189 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-gs27h" event={"ID":"80f364a3-6407-463e-9565-a3bb43cb1494","Type":"ContainerDied","Data":"57959d13503bb849c6442bdf3d6f0c3ed65925527985e5fdecd977b087aed4b3"} Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.313714 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c84f4b854-d9fkv" event={"ID":"07d78534-34ad-40ac-963e-605d72b91c82","Type":"ContainerStarted","Data":"973f13fb83ce05be7e926666b1043d1de211a5830b1ca48cc71cad4a7b2cb684"} Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.314349 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5c84f4b854-d9fkv" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.317729 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-dgttl" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.317736 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-dgttl" event={"ID":"4432a67a-7276-4f55-838d-b685529581d5","Type":"ContainerDied","Data":"79f2d9e26f3b3806a7a81cd7b8365ff74ac06a75fd3803b4d82a48e4240dbf32"} Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.317806 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="79f2d9e26f3b3806a7a81cd7b8365ff74ac06a75fd3803b4d82a48e4240dbf32" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.320902 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d5e3749f-8afb-49a4-b1e0-a46951b4ddee","Type":"ContainerStarted","Data":"592ffa71e930006aebe23469038788c9c804dabb4a613441b6665be044bb977f"} Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.320952 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d5e3749f-8afb-49a4-b1e0-a46951b4ddee","Type":"ContainerStarted","Data":"26139540e0d40c6ac9a618cca43f1558d5fb3abed6400dc6940b3d78b8054ceb"} Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.329501 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7bb67c87c9-5lz66" podStartSLOduration=3.3294795600000002 podStartE2EDuration="3.32947956s" podCreationTimestamp="2026-01-05 22:11:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:11:45.321836668 +0000 UTC m=+1236.899334348" watchObservedRunningTime="2026-01-05 22:11:45.32947956 +0000 UTC m=+1236.906977230" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.368472 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5c84f4b854-d9fkv" podStartSLOduration=3.368451556 podStartE2EDuration="3.368451556s" podCreationTimestamp="2026-01-05 22:11:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:11:45.366346983 +0000 UTC m=+1236.943844653" watchObservedRunningTime="2026-01-05 22:11:45.368451556 +0000 UTC m=+1236.945949226" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.379373 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-config\") pod \"neutron-6c69d8c8f7-7w2gb\" (UID: \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\") " pod="openstack/neutron-6c69d8c8f7-7w2gb" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.379450 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-public-tls-certs\") pod \"neutron-6c69d8c8f7-7w2gb\" (UID: \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\") " pod="openstack/neutron-6c69d8c8f7-7w2gb" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.379478 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-httpd-config\") pod \"neutron-6c69d8c8f7-7w2gb\" (UID: \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\") " pod="openstack/neutron-6c69d8c8f7-7w2gb" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.379507 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftrb5\" (UniqueName: \"kubernetes.io/projected/227b48c0-2e23-4048-8fb5-21628bd9e5e0-kube-api-access-ftrb5\") pod \"neutron-6c69d8c8f7-7w2gb\" (UID: \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\") " pod="openstack/neutron-6c69d8c8f7-7w2gb" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.379537 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-ovndb-tls-certs\") pod \"neutron-6c69d8c8f7-7w2gb\" (UID: \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\") " pod="openstack/neutron-6c69d8c8f7-7w2gb" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.379620 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-internal-tls-certs\") pod \"neutron-6c69d8c8f7-7w2gb\" (UID: \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\") " pod="openstack/neutron-6c69d8c8f7-7w2gb" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.379653 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-combined-ca-bundle\") pod \"neutron-6c69d8c8f7-7w2gb\" (UID: \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\") " pod="openstack/neutron-6c69d8c8f7-7w2gb" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.384975 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-combined-ca-bundle\") pod \"neutron-6c69d8c8f7-7w2gb\" (UID: \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\") " pod="openstack/neutron-6c69d8c8f7-7w2gb" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.385264 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-ovndb-tls-certs\") pod \"neutron-6c69d8c8f7-7w2gb\" (UID: \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\") " pod="openstack/neutron-6c69d8c8f7-7w2gb" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.385968 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-public-tls-certs\") pod \"neutron-6c69d8c8f7-7w2gb\" (UID: \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\") " pod="openstack/neutron-6c69d8c8f7-7w2gb" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.388556 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-internal-tls-certs\") pod \"neutron-6c69d8c8f7-7w2gb\" (UID: \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\") " pod="openstack/neutron-6c69d8c8f7-7w2gb" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.389540 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-httpd-config\") pod \"neutron-6c69d8c8f7-7w2gb\" (UID: \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\") " pod="openstack/neutron-6c69d8c8f7-7w2gb" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.389553 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-config\") pod \"neutron-6c69d8c8f7-7w2gb\" (UID: \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\") " pod="openstack/neutron-6c69d8c8f7-7w2gb" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.400460 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ftrb5\" (UniqueName: \"kubernetes.io/projected/227b48c0-2e23-4048-8fb5-21628bd9e5e0-kube-api-access-ftrb5\") pod \"neutron-6c69d8c8f7-7w2gb\" (UID: \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\") " pod="openstack/neutron-6c69d8c8f7-7w2gb" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.453733 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-7687b85c5d-l8k6w"] Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.457101 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7687b85c5d-l8k6w" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.459956 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6c69d8c8f7-7w2gb" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.462375 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-7vmps" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.463599 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.463785 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.465045 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.476393 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-7687b85c5d-l8k6w"] Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.479655 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.595321 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b29bf6bd-079e-4e8b-bec6-49d4923676af-combined-ca-bundle\") pod \"placement-7687b85c5d-l8k6w\" (UID: \"b29bf6bd-079e-4e8b-bec6-49d4923676af\") " pod="openstack/placement-7687b85c5d-l8k6w" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.595403 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b29bf6bd-079e-4e8b-bec6-49d4923676af-scripts\") pod \"placement-7687b85c5d-l8k6w\" (UID: \"b29bf6bd-079e-4e8b-bec6-49d4923676af\") " pod="openstack/placement-7687b85c5d-l8k6w" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.595458 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b29bf6bd-079e-4e8b-bec6-49d4923676af-logs\") pod \"placement-7687b85c5d-l8k6w\" (UID: \"b29bf6bd-079e-4e8b-bec6-49d4923676af\") " pod="openstack/placement-7687b85c5d-l8k6w" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.595478 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b29bf6bd-079e-4e8b-bec6-49d4923676af-public-tls-certs\") pod \"placement-7687b85c5d-l8k6w\" (UID: \"b29bf6bd-079e-4e8b-bec6-49d4923676af\") " pod="openstack/placement-7687b85c5d-l8k6w" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.595521 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b29bf6bd-079e-4e8b-bec6-49d4923676af-internal-tls-certs\") pod \"placement-7687b85c5d-l8k6w\" (UID: \"b29bf6bd-079e-4e8b-bec6-49d4923676af\") " pod="openstack/placement-7687b85c5d-l8k6w" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.595559 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vrztf\" (UniqueName: \"kubernetes.io/projected/b29bf6bd-079e-4e8b-bec6-49d4923676af-kube-api-access-vrztf\") pod \"placement-7687b85c5d-l8k6w\" (UID: \"b29bf6bd-079e-4e8b-bec6-49d4923676af\") " pod="openstack/placement-7687b85c5d-l8k6w" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.595641 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b29bf6bd-079e-4e8b-bec6-49d4923676af-config-data\") pod \"placement-7687b85c5d-l8k6w\" (UID: \"b29bf6bd-079e-4e8b-bec6-49d4923676af\") " pod="openstack/placement-7687b85c5d-l8k6w" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.696872 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b29bf6bd-079e-4e8b-bec6-49d4923676af-logs\") pod \"placement-7687b85c5d-l8k6w\" (UID: \"b29bf6bd-079e-4e8b-bec6-49d4923676af\") " pod="openstack/placement-7687b85c5d-l8k6w" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.697280 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b29bf6bd-079e-4e8b-bec6-49d4923676af-public-tls-certs\") pod \"placement-7687b85c5d-l8k6w\" (UID: \"b29bf6bd-079e-4e8b-bec6-49d4923676af\") " pod="openstack/placement-7687b85c5d-l8k6w" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.697342 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b29bf6bd-079e-4e8b-bec6-49d4923676af-internal-tls-certs\") pod \"placement-7687b85c5d-l8k6w\" (UID: \"b29bf6bd-079e-4e8b-bec6-49d4923676af\") " pod="openstack/placement-7687b85c5d-l8k6w" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.697371 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vrztf\" (UniqueName: \"kubernetes.io/projected/b29bf6bd-079e-4e8b-bec6-49d4923676af-kube-api-access-vrztf\") pod \"placement-7687b85c5d-l8k6w\" (UID: \"b29bf6bd-079e-4e8b-bec6-49d4923676af\") " pod="openstack/placement-7687b85c5d-l8k6w" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.697453 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b29bf6bd-079e-4e8b-bec6-49d4923676af-config-data\") pod \"placement-7687b85c5d-l8k6w\" (UID: \"b29bf6bd-079e-4e8b-bec6-49d4923676af\") " pod="openstack/placement-7687b85c5d-l8k6w" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.697511 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b29bf6bd-079e-4e8b-bec6-49d4923676af-combined-ca-bundle\") pod \"placement-7687b85c5d-l8k6w\" (UID: \"b29bf6bd-079e-4e8b-bec6-49d4923676af\") " pod="openstack/placement-7687b85c5d-l8k6w" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.697560 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b29bf6bd-079e-4e8b-bec6-49d4923676af-scripts\") pod \"placement-7687b85c5d-l8k6w\" (UID: \"b29bf6bd-079e-4e8b-bec6-49d4923676af\") " pod="openstack/placement-7687b85c5d-l8k6w" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.698778 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b29bf6bd-079e-4e8b-bec6-49d4923676af-logs\") pod \"placement-7687b85c5d-l8k6w\" (UID: \"b29bf6bd-079e-4e8b-bec6-49d4923676af\") " pod="openstack/placement-7687b85c5d-l8k6w" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.709477 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b29bf6bd-079e-4e8b-bec6-49d4923676af-config-data\") pod \"placement-7687b85c5d-l8k6w\" (UID: \"b29bf6bd-079e-4e8b-bec6-49d4923676af\") " pod="openstack/placement-7687b85c5d-l8k6w" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.711193 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b29bf6bd-079e-4e8b-bec6-49d4923676af-combined-ca-bundle\") pod \"placement-7687b85c5d-l8k6w\" (UID: \"b29bf6bd-079e-4e8b-bec6-49d4923676af\") " pod="openstack/placement-7687b85c5d-l8k6w" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.714731 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vrztf\" (UniqueName: \"kubernetes.io/projected/b29bf6bd-079e-4e8b-bec6-49d4923676af-kube-api-access-vrztf\") pod \"placement-7687b85c5d-l8k6w\" (UID: \"b29bf6bd-079e-4e8b-bec6-49d4923676af\") " pod="openstack/placement-7687b85c5d-l8k6w" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.763472 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b29bf6bd-079e-4e8b-bec6-49d4923676af-scripts\") pod \"placement-7687b85c5d-l8k6w\" (UID: \"b29bf6bd-079e-4e8b-bec6-49d4923676af\") " pod="openstack/placement-7687b85c5d-l8k6w" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.763886 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b29bf6bd-079e-4e8b-bec6-49d4923676af-public-tls-certs\") pod \"placement-7687b85c5d-l8k6w\" (UID: \"b29bf6bd-079e-4e8b-bec6-49d4923676af\") " pod="openstack/placement-7687b85c5d-l8k6w" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.765175 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b29bf6bd-079e-4e8b-bec6-49d4923676af-internal-tls-certs\") pod \"placement-7687b85c5d-l8k6w\" (UID: \"b29bf6bd-079e-4e8b-bec6-49d4923676af\") " pod="openstack/placement-7687b85c5d-l8k6w" Jan 05 22:11:45 crc kubenswrapper[4910]: I0105 22:11:45.799727 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7687b85c5d-l8k6w" Jan 05 22:11:46 crc kubenswrapper[4910]: I0105 22:11:46.161468 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6c69d8c8f7-7w2gb"] Jan 05 22:11:46 crc kubenswrapper[4910]: I0105 22:11:46.356051 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d5e3749f-8afb-49a4-b1e0-a46951b4ddee","Type":"ContainerStarted","Data":"948b179b1d2f802a2ca4c0b12b0d6fda3e792316c100f83b47f7519be94b41d6"} Jan 05 22:11:46 crc kubenswrapper[4910]: I0105 22:11:46.417676 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.41765079 podStartE2EDuration="3.41765079s" podCreationTimestamp="2026-01-05 22:11:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:11:46.394446919 +0000 UTC m=+1237.971944589" watchObservedRunningTime="2026-01-05 22:11:46.41765079 +0000 UTC m=+1237.995148460" Jan 05 22:11:47 crc kubenswrapper[4910]: I0105 22:11:47.857187 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-gs27h" Jan 05 22:11:47 crc kubenswrapper[4910]: I0105 22:11:47.958531 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/80f364a3-6407-463e-9565-a3bb43cb1494-fernet-keys\") pod \"80f364a3-6407-463e-9565-a3bb43cb1494\" (UID: \"80f364a3-6407-463e-9565-a3bb43cb1494\") " Jan 05 22:11:47 crc kubenswrapper[4910]: I0105 22:11:47.958603 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80f364a3-6407-463e-9565-a3bb43cb1494-combined-ca-bundle\") pod \"80f364a3-6407-463e-9565-a3bb43cb1494\" (UID: \"80f364a3-6407-463e-9565-a3bb43cb1494\") " Jan 05 22:11:47 crc kubenswrapper[4910]: I0105 22:11:47.958663 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/80f364a3-6407-463e-9565-a3bb43cb1494-credential-keys\") pod \"80f364a3-6407-463e-9565-a3bb43cb1494\" (UID: \"80f364a3-6407-463e-9565-a3bb43cb1494\") " Jan 05 22:11:47 crc kubenswrapper[4910]: I0105 22:11:47.958873 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jqvwn\" (UniqueName: \"kubernetes.io/projected/80f364a3-6407-463e-9565-a3bb43cb1494-kube-api-access-jqvwn\") pod \"80f364a3-6407-463e-9565-a3bb43cb1494\" (UID: \"80f364a3-6407-463e-9565-a3bb43cb1494\") " Jan 05 22:11:47 crc kubenswrapper[4910]: I0105 22:11:47.958903 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80f364a3-6407-463e-9565-a3bb43cb1494-config-data\") pod \"80f364a3-6407-463e-9565-a3bb43cb1494\" (UID: \"80f364a3-6407-463e-9565-a3bb43cb1494\") " Jan 05 22:11:47 crc kubenswrapper[4910]: I0105 22:11:47.959001 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80f364a3-6407-463e-9565-a3bb43cb1494-scripts\") pod \"80f364a3-6407-463e-9565-a3bb43cb1494\" (UID: \"80f364a3-6407-463e-9565-a3bb43cb1494\") " Jan 05 22:11:47 crc kubenswrapper[4910]: I0105 22:11:47.966336 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80f364a3-6407-463e-9565-a3bb43cb1494-scripts" (OuterVolumeSpecName: "scripts") pod "80f364a3-6407-463e-9565-a3bb43cb1494" (UID: "80f364a3-6407-463e-9565-a3bb43cb1494"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:11:47 crc kubenswrapper[4910]: I0105 22:11:47.967601 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80f364a3-6407-463e-9565-a3bb43cb1494-kube-api-access-jqvwn" (OuterVolumeSpecName: "kube-api-access-jqvwn") pod "80f364a3-6407-463e-9565-a3bb43cb1494" (UID: "80f364a3-6407-463e-9565-a3bb43cb1494"). InnerVolumeSpecName "kube-api-access-jqvwn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:11:47 crc kubenswrapper[4910]: I0105 22:11:47.967630 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80f364a3-6407-463e-9565-a3bb43cb1494-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "80f364a3-6407-463e-9565-a3bb43cb1494" (UID: "80f364a3-6407-463e-9565-a3bb43cb1494"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:11:47 crc kubenswrapper[4910]: I0105 22:11:47.971220 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80f364a3-6407-463e-9565-a3bb43cb1494-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "80f364a3-6407-463e-9565-a3bb43cb1494" (UID: "80f364a3-6407-463e-9565-a3bb43cb1494"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:11:47 crc kubenswrapper[4910]: I0105 22:11:47.987508 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80f364a3-6407-463e-9565-a3bb43cb1494-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "80f364a3-6407-463e-9565-a3bb43cb1494" (UID: "80f364a3-6407-463e-9565-a3bb43cb1494"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:11:47 crc kubenswrapper[4910]: I0105 22:11:47.992946 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80f364a3-6407-463e-9565-a3bb43cb1494-config-data" (OuterVolumeSpecName: "config-data") pod "80f364a3-6407-463e-9565-a3bb43cb1494" (UID: "80f364a3-6407-463e-9565-a3bb43cb1494"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:11:48 crc kubenswrapper[4910]: I0105 22:11:48.061821 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80f364a3-6407-463e-9565-a3bb43cb1494-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:48 crc kubenswrapper[4910]: I0105 22:11:48.061863 4910 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/80f364a3-6407-463e-9565-a3bb43cb1494-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:48 crc kubenswrapper[4910]: I0105 22:11:48.061873 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80f364a3-6407-463e-9565-a3bb43cb1494-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:48 crc kubenswrapper[4910]: I0105 22:11:48.061884 4910 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/80f364a3-6407-463e-9565-a3bb43cb1494-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:48 crc kubenswrapper[4910]: I0105 22:11:48.061895 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jqvwn\" (UniqueName: \"kubernetes.io/projected/80f364a3-6407-463e-9565-a3bb43cb1494-kube-api-access-jqvwn\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:48 crc kubenswrapper[4910]: I0105 22:11:48.061903 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80f364a3-6407-463e-9565-a3bb43cb1494-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:48 crc kubenswrapper[4910]: I0105 22:11:48.373663 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-gs27h" Jan 05 22:11:48 crc kubenswrapper[4910]: I0105 22:11:48.373664 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-gs27h" event={"ID":"80f364a3-6407-463e-9565-a3bb43cb1494","Type":"ContainerDied","Data":"f0c06368cd11636a0a0fcf972e28b9d6b83443c0d955322abc32dd1796e71bf8"} Jan 05 22:11:48 crc kubenswrapper[4910]: I0105 22:11:48.373709 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f0c06368cd11636a0a0fcf972e28b9d6b83443c0d955322abc32dd1796e71bf8" Jan 05 22:11:48 crc kubenswrapper[4910]: I0105 22:11:48.375268 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6c69d8c8f7-7w2gb" event={"ID":"227b48c0-2e23-4048-8fb5-21628bd9e5e0","Type":"ContainerStarted","Data":"d99c8f190fc33bc92e2cfd7b4ba8802c8d2aee488a17c92d2a53677ceb742870"} Jan 05 22:11:48 crc kubenswrapper[4910]: I0105 22:11:48.661747 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 05 22:11:48 crc kubenswrapper[4910]: I0105 22:11:48.662298 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 05 22:11:48 crc kubenswrapper[4910]: I0105 22:11:48.697050 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 05 22:11:48 crc kubenswrapper[4910]: I0105 22:11:48.707516 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 05 22:11:48 crc kubenswrapper[4910]: I0105 22:11:48.972535 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-7bbfdb8fcf-zlpw8"] Jan 05 22:11:48 crc kubenswrapper[4910]: E0105 22:11:48.973134 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80f364a3-6407-463e-9565-a3bb43cb1494" containerName="keystone-bootstrap" Jan 05 22:11:48 crc kubenswrapper[4910]: I0105 22:11:48.973151 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="80f364a3-6407-463e-9565-a3bb43cb1494" containerName="keystone-bootstrap" Jan 05 22:11:48 crc kubenswrapper[4910]: I0105 22:11:48.973380 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="80f364a3-6407-463e-9565-a3bb43cb1494" containerName="keystone-bootstrap" Jan 05 22:11:48 crc kubenswrapper[4910]: I0105 22:11:48.974283 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7bbfdb8fcf-zlpw8" Jan 05 22:11:48 crc kubenswrapper[4910]: I0105 22:11:48.976378 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 05 22:11:48 crc kubenswrapper[4910]: I0105 22:11:48.976703 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 05 22:11:48 crc kubenswrapper[4910]: I0105 22:11:48.976928 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 05 22:11:48 crc kubenswrapper[4910]: I0105 22:11:48.977241 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Jan 05 22:11:48 crc kubenswrapper[4910]: I0105 22:11:48.978401 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Jan 05 22:11:48 crc kubenswrapper[4910]: I0105 22:11:48.978570 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-t5hmz" Jan 05 22:11:48 crc kubenswrapper[4910]: I0105 22:11:48.988330 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7bbfdb8fcf-zlpw8"] Jan 05 22:11:49 crc kubenswrapper[4910]: I0105 22:11:49.090394 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-credential-keys\") pod \"keystone-7bbfdb8fcf-zlpw8\" (UID: \"97c873ec-c28a-4121-bac2-98b49c6b42a0\") " pod="openstack/keystone-7bbfdb8fcf-zlpw8" Jan 05 22:11:49 crc kubenswrapper[4910]: I0105 22:11:49.090467 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-scripts\") pod \"keystone-7bbfdb8fcf-zlpw8\" (UID: \"97c873ec-c28a-4121-bac2-98b49c6b42a0\") " pod="openstack/keystone-7bbfdb8fcf-zlpw8" Jan 05 22:11:49 crc kubenswrapper[4910]: I0105 22:11:49.090564 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-public-tls-certs\") pod \"keystone-7bbfdb8fcf-zlpw8\" (UID: \"97c873ec-c28a-4121-bac2-98b49c6b42a0\") " pod="openstack/keystone-7bbfdb8fcf-zlpw8" Jan 05 22:11:49 crc kubenswrapper[4910]: I0105 22:11:49.090672 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-internal-tls-certs\") pod \"keystone-7bbfdb8fcf-zlpw8\" (UID: \"97c873ec-c28a-4121-bac2-98b49c6b42a0\") " pod="openstack/keystone-7bbfdb8fcf-zlpw8" Jan 05 22:11:49 crc kubenswrapper[4910]: I0105 22:11:49.090829 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-combined-ca-bundle\") pod \"keystone-7bbfdb8fcf-zlpw8\" (UID: \"97c873ec-c28a-4121-bac2-98b49c6b42a0\") " pod="openstack/keystone-7bbfdb8fcf-zlpw8" Jan 05 22:11:49 crc kubenswrapper[4910]: I0105 22:11:49.090890 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-fernet-keys\") pod \"keystone-7bbfdb8fcf-zlpw8\" (UID: \"97c873ec-c28a-4121-bac2-98b49c6b42a0\") " pod="openstack/keystone-7bbfdb8fcf-zlpw8" Jan 05 22:11:49 crc kubenswrapper[4910]: I0105 22:11:49.091085 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-config-data\") pod \"keystone-7bbfdb8fcf-zlpw8\" (UID: \"97c873ec-c28a-4121-bac2-98b49c6b42a0\") " pod="openstack/keystone-7bbfdb8fcf-zlpw8" Jan 05 22:11:49 crc kubenswrapper[4910]: I0105 22:11:49.091280 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbxgh\" (UniqueName: \"kubernetes.io/projected/97c873ec-c28a-4121-bac2-98b49c6b42a0-kube-api-access-vbxgh\") pod \"keystone-7bbfdb8fcf-zlpw8\" (UID: \"97c873ec-c28a-4121-bac2-98b49c6b42a0\") " pod="openstack/keystone-7bbfdb8fcf-zlpw8" Jan 05 22:11:49 crc kubenswrapper[4910]: I0105 22:11:49.193804 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-credential-keys\") pod \"keystone-7bbfdb8fcf-zlpw8\" (UID: \"97c873ec-c28a-4121-bac2-98b49c6b42a0\") " pod="openstack/keystone-7bbfdb8fcf-zlpw8" Jan 05 22:11:49 crc kubenswrapper[4910]: I0105 22:11:49.193893 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-scripts\") pod \"keystone-7bbfdb8fcf-zlpw8\" (UID: \"97c873ec-c28a-4121-bac2-98b49c6b42a0\") " pod="openstack/keystone-7bbfdb8fcf-zlpw8" Jan 05 22:11:49 crc kubenswrapper[4910]: I0105 22:11:49.193925 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-public-tls-certs\") pod \"keystone-7bbfdb8fcf-zlpw8\" (UID: \"97c873ec-c28a-4121-bac2-98b49c6b42a0\") " pod="openstack/keystone-7bbfdb8fcf-zlpw8" Jan 05 22:11:49 crc kubenswrapper[4910]: I0105 22:11:49.193956 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-internal-tls-certs\") pod \"keystone-7bbfdb8fcf-zlpw8\" (UID: \"97c873ec-c28a-4121-bac2-98b49c6b42a0\") " pod="openstack/keystone-7bbfdb8fcf-zlpw8" Jan 05 22:11:49 crc kubenswrapper[4910]: I0105 22:11:49.194018 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-combined-ca-bundle\") pod \"keystone-7bbfdb8fcf-zlpw8\" (UID: \"97c873ec-c28a-4121-bac2-98b49c6b42a0\") " pod="openstack/keystone-7bbfdb8fcf-zlpw8" Jan 05 22:11:49 crc kubenswrapper[4910]: I0105 22:11:49.194049 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-fernet-keys\") pod \"keystone-7bbfdb8fcf-zlpw8\" (UID: \"97c873ec-c28a-4121-bac2-98b49c6b42a0\") " pod="openstack/keystone-7bbfdb8fcf-zlpw8" Jan 05 22:11:49 crc kubenswrapper[4910]: I0105 22:11:49.194092 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-config-data\") pod \"keystone-7bbfdb8fcf-zlpw8\" (UID: \"97c873ec-c28a-4121-bac2-98b49c6b42a0\") " pod="openstack/keystone-7bbfdb8fcf-zlpw8" Jan 05 22:11:49 crc kubenswrapper[4910]: I0105 22:11:49.194145 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vbxgh\" (UniqueName: \"kubernetes.io/projected/97c873ec-c28a-4121-bac2-98b49c6b42a0-kube-api-access-vbxgh\") pod \"keystone-7bbfdb8fcf-zlpw8\" (UID: \"97c873ec-c28a-4121-bac2-98b49c6b42a0\") " pod="openstack/keystone-7bbfdb8fcf-zlpw8" Jan 05 22:11:49 crc kubenswrapper[4910]: I0105 22:11:49.200917 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-fernet-keys\") pod \"keystone-7bbfdb8fcf-zlpw8\" (UID: \"97c873ec-c28a-4121-bac2-98b49c6b42a0\") " pod="openstack/keystone-7bbfdb8fcf-zlpw8" Jan 05 22:11:49 crc kubenswrapper[4910]: I0105 22:11:49.202974 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-credential-keys\") pod \"keystone-7bbfdb8fcf-zlpw8\" (UID: \"97c873ec-c28a-4121-bac2-98b49c6b42a0\") " pod="openstack/keystone-7bbfdb8fcf-zlpw8" Jan 05 22:11:49 crc kubenswrapper[4910]: I0105 22:11:49.203543 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-internal-tls-certs\") pod \"keystone-7bbfdb8fcf-zlpw8\" (UID: \"97c873ec-c28a-4121-bac2-98b49c6b42a0\") " pod="openstack/keystone-7bbfdb8fcf-zlpw8" Jan 05 22:11:49 crc kubenswrapper[4910]: I0105 22:11:49.204484 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-public-tls-certs\") pod \"keystone-7bbfdb8fcf-zlpw8\" (UID: \"97c873ec-c28a-4121-bac2-98b49c6b42a0\") " pod="openstack/keystone-7bbfdb8fcf-zlpw8" Jan 05 22:11:49 crc kubenswrapper[4910]: I0105 22:11:49.205223 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-scripts\") pod \"keystone-7bbfdb8fcf-zlpw8\" (UID: \"97c873ec-c28a-4121-bac2-98b49c6b42a0\") " pod="openstack/keystone-7bbfdb8fcf-zlpw8" Jan 05 22:11:49 crc kubenswrapper[4910]: I0105 22:11:49.206584 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-config-data\") pod \"keystone-7bbfdb8fcf-zlpw8\" (UID: \"97c873ec-c28a-4121-bac2-98b49c6b42a0\") " pod="openstack/keystone-7bbfdb8fcf-zlpw8" Jan 05 22:11:49 crc kubenswrapper[4910]: I0105 22:11:49.213152 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-combined-ca-bundle\") pod \"keystone-7bbfdb8fcf-zlpw8\" (UID: \"97c873ec-c28a-4121-bac2-98b49c6b42a0\") " pod="openstack/keystone-7bbfdb8fcf-zlpw8" Jan 05 22:11:49 crc kubenswrapper[4910]: I0105 22:11:49.216678 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbxgh\" (UniqueName: \"kubernetes.io/projected/97c873ec-c28a-4121-bac2-98b49c6b42a0-kube-api-access-vbxgh\") pod \"keystone-7bbfdb8fcf-zlpw8\" (UID: \"97c873ec-c28a-4121-bac2-98b49c6b42a0\") " pod="openstack/keystone-7bbfdb8fcf-zlpw8" Jan 05 22:11:49 crc kubenswrapper[4910]: I0105 22:11:49.310866 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7bbfdb8fcf-zlpw8" Jan 05 22:11:49 crc kubenswrapper[4910]: I0105 22:11:49.385617 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 05 22:11:49 crc kubenswrapper[4910]: I0105 22:11:49.385890 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 05 22:11:50 crc kubenswrapper[4910]: I0105 22:11:50.602225 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7bbfdb8fcf-zlpw8"] Jan 05 22:11:50 crc kubenswrapper[4910]: I0105 22:11:50.738433 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-7687b85c5d-l8k6w"] Jan 05 22:11:50 crc kubenswrapper[4910]: W0105 22:11:50.746128 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb29bf6bd_079e_4e8b_bec6_49d4923676af.slice/crio-506be26d44cefd5ac4e779619bdc514c3a2aceb203aa2c9a18aa1caf4694818a WatchSource:0}: Error finding container 506be26d44cefd5ac4e779619bdc514c3a2aceb203aa2c9a18aa1caf4694818a: Status 404 returned error can't find the container with id 506be26d44cefd5ac4e779619bdc514c3a2aceb203aa2c9a18aa1caf4694818a Jan 05 22:11:51 crc kubenswrapper[4910]: I0105 22:11:51.405048 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7687b85c5d-l8k6w" event={"ID":"b29bf6bd-079e-4e8b-bec6-49d4923676af","Type":"ContainerStarted","Data":"008ff3c44ce49caf6caea7aa9f55cfc608a8d5e702630f035b8953f4de51ddc1"} Jan 05 22:11:51 crc kubenswrapper[4910]: I0105 22:11:51.405632 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7687b85c5d-l8k6w" event={"ID":"b29bf6bd-079e-4e8b-bec6-49d4923676af","Type":"ContainerStarted","Data":"5f896af4ce5feef15b4dba2b2abb97a685fc637f2ec21e921db5a1f857688437"} Jan 05 22:11:51 crc kubenswrapper[4910]: I0105 22:11:51.405648 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7687b85c5d-l8k6w" event={"ID":"b29bf6bd-079e-4e8b-bec6-49d4923676af","Type":"ContainerStarted","Data":"506be26d44cefd5ac4e779619bdc514c3a2aceb203aa2c9a18aa1caf4694818a"} Jan 05 22:11:51 crc kubenswrapper[4910]: I0105 22:11:51.409947 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac","Type":"ContainerStarted","Data":"9db8d840f3a91c8db05c8717dfd0f5740fcebb85ae0ce45d04bc51eaa83dfc45"} Jan 05 22:11:51 crc kubenswrapper[4910]: I0105 22:11:51.413011 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6c69d8c8f7-7w2gb" event={"ID":"227b48c0-2e23-4048-8fb5-21628bd9e5e0","Type":"ContainerStarted","Data":"0ce63635905b4359223cc707716af9867aeeb87e2e260750761f5c1bca381777"} Jan 05 22:11:51 crc kubenswrapper[4910]: I0105 22:11:51.413044 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6c69d8c8f7-7w2gb" event={"ID":"227b48c0-2e23-4048-8fb5-21628bd9e5e0","Type":"ContainerStarted","Data":"5e9cd39ea8845a5fd2c6e7c0fe1c864ac551845861a02b8b20ce5e8da8cd01fb"} Jan 05 22:11:51 crc kubenswrapper[4910]: I0105 22:11:51.415264 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-6c69d8c8f7-7w2gb" Jan 05 22:11:51 crc kubenswrapper[4910]: I0105 22:11:51.425636 4910 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 05 22:11:51 crc kubenswrapper[4910]: I0105 22:11:51.425664 4910 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 05 22:11:51 crc kubenswrapper[4910]: I0105 22:11:51.426557 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7bbfdb8fcf-zlpw8" event={"ID":"97c873ec-c28a-4121-bac2-98b49c6b42a0","Type":"ContainerStarted","Data":"4e8b2fc70196427c5c99643640fbe7135d80de9a670ca3af9c02eb288b8aa7e3"} Jan 05 22:11:51 crc kubenswrapper[4910]: I0105 22:11:51.426591 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-7bbfdb8fcf-zlpw8" Jan 05 22:11:51 crc kubenswrapper[4910]: I0105 22:11:51.426603 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7bbfdb8fcf-zlpw8" event={"ID":"97c873ec-c28a-4121-bac2-98b49c6b42a0","Type":"ContainerStarted","Data":"c6b1e392bb89b0aa402e5d93ba7298e8ab8df4208ab11d5d3207690e4b81280a"} Jan 05 22:11:51 crc kubenswrapper[4910]: I0105 22:11:51.451104 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-6c69d8c8f7-7w2gb" podStartSLOduration=6.451085759 podStartE2EDuration="6.451085759s" podCreationTimestamp="2026-01-05 22:11:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:11:51.444536615 +0000 UTC m=+1243.022034315" watchObservedRunningTime="2026-01-05 22:11:51.451085759 +0000 UTC m=+1243.028583429" Jan 05 22:11:51 crc kubenswrapper[4910]: I0105 22:11:51.479309 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-7bbfdb8fcf-zlpw8" podStartSLOduration=3.479288405 podStartE2EDuration="3.479288405s" podCreationTimestamp="2026-01-05 22:11:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:11:51.47189939 +0000 UTC m=+1243.049397060" watchObservedRunningTime="2026-01-05 22:11:51.479288405 +0000 UTC m=+1243.056786075" Jan 05 22:11:51 crc kubenswrapper[4910]: I0105 22:11:51.744322 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 05 22:11:51 crc kubenswrapper[4910]: I0105 22:11:51.897553 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 05 22:11:52 crc kubenswrapper[4910]: I0105 22:11:52.436040 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-7687b85c5d-l8k6w" Jan 05 22:11:52 crc kubenswrapper[4910]: I0105 22:11:52.436175 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-7687b85c5d-l8k6w" Jan 05 22:11:52 crc kubenswrapper[4910]: I0105 22:11:52.462044 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-7687b85c5d-l8k6w" podStartSLOduration=7.462024115 podStartE2EDuration="7.462024115s" podCreationTimestamp="2026-01-05 22:11:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:11:52.454071236 +0000 UTC m=+1244.031568906" watchObservedRunningTime="2026-01-05 22:11:52.462024115 +0000 UTC m=+1244.039521785" Jan 05 22:11:52 crc kubenswrapper[4910]: I0105 22:11:52.760326 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7bb67c87c9-5lz66" Jan 05 22:11:52 crc kubenswrapper[4910]: I0105 22:11:52.821241 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-66567888d7-qkxjs"] Jan 05 22:11:52 crc kubenswrapper[4910]: I0105 22:11:52.821486 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-66567888d7-qkxjs" podUID="be9110f6-9a1a-4e05-8e41-0eb8630686cc" containerName="dnsmasq-dns" containerID="cri-o://49c2536bdde68dc1507406c0e6280d6c8a672a65b01dc41ba5716ededb429a38" gracePeriod=10 Jan 05 22:11:53 crc kubenswrapper[4910]: I0105 22:11:53.282453 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-66567888d7-qkxjs" podUID="be9110f6-9a1a-4e05-8e41-0eb8630686cc" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.143:5353: connect: connection refused" Jan 05 22:11:53 crc kubenswrapper[4910]: I0105 22:11:53.444491 4910 generic.go:334] "Generic (PLEG): container finished" podID="be9110f6-9a1a-4e05-8e41-0eb8630686cc" containerID="49c2536bdde68dc1507406c0e6280d6c8a672a65b01dc41ba5716ededb429a38" exitCode=0 Jan 05 22:11:53 crc kubenswrapper[4910]: I0105 22:11:53.444576 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66567888d7-qkxjs" event={"ID":"be9110f6-9a1a-4e05-8e41-0eb8630686cc","Type":"ContainerDied","Data":"49c2536bdde68dc1507406c0e6280d6c8a672a65b01dc41ba5716ededb429a38"} Jan 05 22:11:53 crc kubenswrapper[4910]: I0105 22:11:53.942313 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 05 22:11:53 crc kubenswrapper[4910]: I0105 22:11:53.942361 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 05 22:11:53 crc kubenswrapper[4910]: I0105 22:11:53.975920 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 05 22:11:53 crc kubenswrapper[4910]: I0105 22:11:53.988563 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 05 22:11:54 crc kubenswrapper[4910]: I0105 22:11:54.460536 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 05 22:11:54 crc kubenswrapper[4910]: I0105 22:11:54.460861 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 05 22:11:54 crc kubenswrapper[4910]: I0105 22:11:54.558286 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66567888d7-qkxjs" Jan 05 22:11:54 crc kubenswrapper[4910]: I0105 22:11:54.616048 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/be9110f6-9a1a-4e05-8e41-0eb8630686cc-ovsdbserver-nb\") pod \"be9110f6-9a1a-4e05-8e41-0eb8630686cc\" (UID: \"be9110f6-9a1a-4e05-8e41-0eb8630686cc\") " Jan 05 22:11:54 crc kubenswrapper[4910]: I0105 22:11:54.616170 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/be9110f6-9a1a-4e05-8e41-0eb8630686cc-dns-svc\") pod \"be9110f6-9a1a-4e05-8e41-0eb8630686cc\" (UID: \"be9110f6-9a1a-4e05-8e41-0eb8630686cc\") " Jan 05 22:11:54 crc kubenswrapper[4910]: I0105 22:11:54.616226 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be9110f6-9a1a-4e05-8e41-0eb8630686cc-config\") pod \"be9110f6-9a1a-4e05-8e41-0eb8630686cc\" (UID: \"be9110f6-9a1a-4e05-8e41-0eb8630686cc\") " Jan 05 22:11:54 crc kubenswrapper[4910]: I0105 22:11:54.616445 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6k9lx\" (UniqueName: \"kubernetes.io/projected/be9110f6-9a1a-4e05-8e41-0eb8630686cc-kube-api-access-6k9lx\") pod \"be9110f6-9a1a-4e05-8e41-0eb8630686cc\" (UID: \"be9110f6-9a1a-4e05-8e41-0eb8630686cc\") " Jan 05 22:11:54 crc kubenswrapper[4910]: I0105 22:11:54.616544 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/be9110f6-9a1a-4e05-8e41-0eb8630686cc-dns-swift-storage-0\") pod \"be9110f6-9a1a-4e05-8e41-0eb8630686cc\" (UID: \"be9110f6-9a1a-4e05-8e41-0eb8630686cc\") " Jan 05 22:11:54 crc kubenswrapper[4910]: I0105 22:11:54.616581 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/be9110f6-9a1a-4e05-8e41-0eb8630686cc-ovsdbserver-sb\") pod \"be9110f6-9a1a-4e05-8e41-0eb8630686cc\" (UID: \"be9110f6-9a1a-4e05-8e41-0eb8630686cc\") " Jan 05 22:11:54 crc kubenswrapper[4910]: I0105 22:11:54.638621 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be9110f6-9a1a-4e05-8e41-0eb8630686cc-kube-api-access-6k9lx" (OuterVolumeSpecName: "kube-api-access-6k9lx") pod "be9110f6-9a1a-4e05-8e41-0eb8630686cc" (UID: "be9110f6-9a1a-4e05-8e41-0eb8630686cc"). InnerVolumeSpecName "kube-api-access-6k9lx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:11:54 crc kubenswrapper[4910]: I0105 22:11:54.705525 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/be9110f6-9a1a-4e05-8e41-0eb8630686cc-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "be9110f6-9a1a-4e05-8e41-0eb8630686cc" (UID: "be9110f6-9a1a-4e05-8e41-0eb8630686cc"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:11:54 crc kubenswrapper[4910]: I0105 22:11:54.719039 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6k9lx\" (UniqueName: \"kubernetes.io/projected/be9110f6-9a1a-4e05-8e41-0eb8630686cc-kube-api-access-6k9lx\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:54 crc kubenswrapper[4910]: I0105 22:11:54.719076 4910 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/be9110f6-9a1a-4e05-8e41-0eb8630686cc-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:54 crc kubenswrapper[4910]: I0105 22:11:54.728819 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/be9110f6-9a1a-4e05-8e41-0eb8630686cc-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "be9110f6-9a1a-4e05-8e41-0eb8630686cc" (UID: "be9110f6-9a1a-4e05-8e41-0eb8630686cc"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:11:54 crc kubenswrapper[4910]: I0105 22:11:54.738623 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/be9110f6-9a1a-4e05-8e41-0eb8630686cc-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "be9110f6-9a1a-4e05-8e41-0eb8630686cc" (UID: "be9110f6-9a1a-4e05-8e41-0eb8630686cc"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:11:54 crc kubenswrapper[4910]: I0105 22:11:54.752395 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/be9110f6-9a1a-4e05-8e41-0eb8630686cc-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "be9110f6-9a1a-4e05-8e41-0eb8630686cc" (UID: "be9110f6-9a1a-4e05-8e41-0eb8630686cc"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:11:54 crc kubenswrapper[4910]: I0105 22:11:54.769036 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/be9110f6-9a1a-4e05-8e41-0eb8630686cc-config" (OuterVolumeSpecName: "config") pod "be9110f6-9a1a-4e05-8e41-0eb8630686cc" (UID: "be9110f6-9a1a-4e05-8e41-0eb8630686cc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:11:54 crc kubenswrapper[4910]: I0105 22:11:54.821927 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/be9110f6-9a1a-4e05-8e41-0eb8630686cc-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:54 crc kubenswrapper[4910]: I0105 22:11:54.823008 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/be9110f6-9a1a-4e05-8e41-0eb8630686cc-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:54 crc kubenswrapper[4910]: I0105 22:11:54.823026 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/be9110f6-9a1a-4e05-8e41-0eb8630686cc-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:54 crc kubenswrapper[4910]: I0105 22:11:54.823038 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be9110f6-9a1a-4e05-8e41-0eb8630686cc-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:11:55 crc kubenswrapper[4910]: I0105 22:11:55.469377 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-ktvmp" event={"ID":"b94e459d-172c-41ca-a38c-384a5f3e323e","Type":"ContainerStarted","Data":"44506650ad4a40575aa61344dc4d96523c508c48166a6845b6b1d2011a42b387"} Jan 05 22:11:55 crc kubenswrapper[4910]: I0105 22:11:55.472147 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66567888d7-qkxjs" Jan 05 22:11:55 crc kubenswrapper[4910]: I0105 22:11:55.472212 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66567888d7-qkxjs" event={"ID":"be9110f6-9a1a-4e05-8e41-0eb8630686cc","Type":"ContainerDied","Data":"a68c70ce4003a93a43b379ebfb09a5850d12eef770fbe9e304a207f2ac9308a2"} Jan 05 22:11:55 crc kubenswrapper[4910]: I0105 22:11:55.472265 4910 scope.go:117] "RemoveContainer" containerID="49c2536bdde68dc1507406c0e6280d6c8a672a65b01dc41ba5716ededb429a38" Jan 05 22:11:55 crc kubenswrapper[4910]: I0105 22:11:55.492163 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-ktvmp" podStartSLOduration=3.866190431 podStartE2EDuration="44.492142636s" podCreationTimestamp="2026-01-05 22:11:11 +0000 UTC" firstStartedPulling="2026-01-05 22:11:13.598860365 +0000 UTC m=+1205.176358025" lastFinishedPulling="2026-01-05 22:11:54.22481256 +0000 UTC m=+1245.802310230" observedRunningTime="2026-01-05 22:11:55.486364592 +0000 UTC m=+1247.063862312" watchObservedRunningTime="2026-01-05 22:11:55.492142636 +0000 UTC m=+1247.069640306" Jan 05 22:11:55 crc kubenswrapper[4910]: I0105 22:11:55.510700 4910 scope.go:117] "RemoveContainer" containerID="5c0ede15b426840cab327d372b81755dfcc50901e0f9e3eeed4e5f6148d82e00" Jan 05 22:11:55 crc kubenswrapper[4910]: I0105 22:11:55.512071 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-66567888d7-qkxjs"] Jan 05 22:11:55 crc kubenswrapper[4910]: I0105 22:11:55.520519 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-66567888d7-qkxjs"] Jan 05 22:11:56 crc kubenswrapper[4910]: I0105 22:11:56.483312 4910 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 05 22:11:56 crc kubenswrapper[4910]: I0105 22:11:56.484047 4910 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 05 22:11:56 crc kubenswrapper[4910]: I0105 22:11:56.484932 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-jn5f9" event={"ID":"726618d0-e442-410e-87df-33bca2cf52a4","Type":"ContainerStarted","Data":"9e5bad872d1ceb46b26c5dc21dec8556316d8cada45123a1f5bd1c291685e9f4"} Jan 05 22:11:56 crc kubenswrapper[4910]: I0105 22:11:56.502964 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-jn5f9" podStartSLOduration=4.043807042 podStartE2EDuration="46.502939909s" podCreationTimestamp="2026-01-05 22:11:10 +0000 UTC" firstStartedPulling="2026-01-05 22:11:11.967188539 +0000 UTC m=+1203.544686209" lastFinishedPulling="2026-01-05 22:11:54.426321406 +0000 UTC m=+1246.003819076" observedRunningTime="2026-01-05 22:11:56.498494138 +0000 UTC m=+1248.075991798" watchObservedRunningTime="2026-01-05 22:11:56.502939909 +0000 UTC m=+1248.080437579" Jan 05 22:11:56 crc kubenswrapper[4910]: I0105 22:11:56.638822 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 05 22:11:56 crc kubenswrapper[4910]: I0105 22:11:56.663197 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 05 22:11:56 crc kubenswrapper[4910]: I0105 22:11:56.738289 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be9110f6-9a1a-4e05-8e41-0eb8630686cc" path="/var/lib/kubelet/pods/be9110f6-9a1a-4e05-8e41-0eb8630686cc/volumes" Jan 05 22:11:58 crc kubenswrapper[4910]: I0105 22:11:58.530444 4910 generic.go:334] "Generic (PLEG): container finished" podID="b94e459d-172c-41ca-a38c-384a5f3e323e" containerID="44506650ad4a40575aa61344dc4d96523c508c48166a6845b6b1d2011a42b387" exitCode=0 Jan 05 22:11:58 crc kubenswrapper[4910]: I0105 22:11:58.530608 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-ktvmp" event={"ID":"b94e459d-172c-41ca-a38c-384a5f3e323e","Type":"ContainerDied","Data":"44506650ad4a40575aa61344dc4d96523c508c48166a6845b6b1d2011a42b387"} Jan 05 22:12:01 crc kubenswrapper[4910]: I0105 22:12:01.650823 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-ktvmp" Jan 05 22:12:01 crc kubenswrapper[4910]: I0105 22:12:01.756599 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b94e459d-172c-41ca-a38c-384a5f3e323e-db-sync-config-data\") pod \"b94e459d-172c-41ca-a38c-384a5f3e323e\" (UID: \"b94e459d-172c-41ca-a38c-384a5f3e323e\") " Jan 05 22:12:01 crc kubenswrapper[4910]: I0105 22:12:01.756809 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mzjv2\" (UniqueName: \"kubernetes.io/projected/b94e459d-172c-41ca-a38c-384a5f3e323e-kube-api-access-mzjv2\") pod \"b94e459d-172c-41ca-a38c-384a5f3e323e\" (UID: \"b94e459d-172c-41ca-a38c-384a5f3e323e\") " Jan 05 22:12:01 crc kubenswrapper[4910]: I0105 22:12:01.756891 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b94e459d-172c-41ca-a38c-384a5f3e323e-combined-ca-bundle\") pod \"b94e459d-172c-41ca-a38c-384a5f3e323e\" (UID: \"b94e459d-172c-41ca-a38c-384a5f3e323e\") " Jan 05 22:12:01 crc kubenswrapper[4910]: I0105 22:12:01.763358 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b94e459d-172c-41ca-a38c-384a5f3e323e-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "b94e459d-172c-41ca-a38c-384a5f3e323e" (UID: "b94e459d-172c-41ca-a38c-384a5f3e323e"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:01 crc kubenswrapper[4910]: I0105 22:12:01.764129 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b94e459d-172c-41ca-a38c-384a5f3e323e-kube-api-access-mzjv2" (OuterVolumeSpecName: "kube-api-access-mzjv2") pod "b94e459d-172c-41ca-a38c-384a5f3e323e" (UID: "b94e459d-172c-41ca-a38c-384a5f3e323e"). InnerVolumeSpecName "kube-api-access-mzjv2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:12:01 crc kubenswrapper[4910]: I0105 22:12:01.789264 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b94e459d-172c-41ca-a38c-384a5f3e323e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b94e459d-172c-41ca-a38c-384a5f3e323e" (UID: "b94e459d-172c-41ca-a38c-384a5f3e323e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:01 crc kubenswrapper[4910]: I0105 22:12:01.859457 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mzjv2\" (UniqueName: \"kubernetes.io/projected/b94e459d-172c-41ca-a38c-384a5f3e323e-kube-api-access-mzjv2\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:01 crc kubenswrapper[4910]: I0105 22:12:01.859491 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b94e459d-172c-41ca-a38c-384a5f3e323e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:01 crc kubenswrapper[4910]: I0105 22:12:01.859500 4910 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b94e459d-172c-41ca-a38c-384a5f3e323e-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:02 crc kubenswrapper[4910]: E0105 22:12:02.322474 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="c38271dc-8b9a-4bb0-a8bc-2fc78c641aac" Jan 05 22:12:02 crc kubenswrapper[4910]: I0105 22:12:02.570324 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-ktvmp" Jan 05 22:12:02 crc kubenswrapper[4910]: I0105 22:12:02.570314 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-ktvmp" event={"ID":"b94e459d-172c-41ca-a38c-384a5f3e323e","Type":"ContainerDied","Data":"81fbf4382632658ac6489c62b331ba64f07d423a45af65c462acb27a2b0bf043"} Jan 05 22:12:02 crc kubenswrapper[4910]: I0105 22:12:02.570977 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="81fbf4382632658ac6489c62b331ba64f07d423a45af65c462acb27a2b0bf043" Jan 05 22:12:02 crc kubenswrapper[4910]: I0105 22:12:02.573032 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac","Type":"ContainerStarted","Data":"2e60f99295c06fb62dd6ce701a2c4469eb0953a491479b5e292cfd784ef83bab"} Jan 05 22:12:02 crc kubenswrapper[4910]: I0105 22:12:02.573310 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c38271dc-8b9a-4bb0-a8bc-2fc78c641aac" containerName="ceilometer-notification-agent" containerID="cri-o://be213fc6c3c4cd181483e6f8d5f930cbfb3f3f94dff00ab7b6b2c04364f113bb" gracePeriod=30 Jan 05 22:12:02 crc kubenswrapper[4910]: I0105 22:12:02.573403 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 05 22:12:02 crc kubenswrapper[4910]: I0105 22:12:02.573399 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c38271dc-8b9a-4bb0-a8bc-2fc78c641aac" containerName="proxy-httpd" containerID="cri-o://2e60f99295c06fb62dd6ce701a2c4469eb0953a491479b5e292cfd784ef83bab" gracePeriod=30 Jan 05 22:12:02 crc kubenswrapper[4910]: I0105 22:12:02.573471 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c38271dc-8b9a-4bb0-a8bc-2fc78c641aac" containerName="sg-core" containerID="cri-o://9db8d840f3a91c8db05c8717dfd0f5740fcebb85ae0ce45d04bc51eaa83dfc45" gracePeriod=30 Jan 05 22:12:02 crc kubenswrapper[4910]: I0105 22:12:02.976818 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-66897dc6c-9tqxs"] Jan 05 22:12:02 crc kubenswrapper[4910]: E0105 22:12:02.977480 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be9110f6-9a1a-4e05-8e41-0eb8630686cc" containerName="init" Jan 05 22:12:02 crc kubenswrapper[4910]: I0105 22:12:02.977505 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="be9110f6-9a1a-4e05-8e41-0eb8630686cc" containerName="init" Jan 05 22:12:02 crc kubenswrapper[4910]: E0105 22:12:02.977548 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be9110f6-9a1a-4e05-8e41-0eb8630686cc" containerName="dnsmasq-dns" Jan 05 22:12:02 crc kubenswrapper[4910]: I0105 22:12:02.977559 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="be9110f6-9a1a-4e05-8e41-0eb8630686cc" containerName="dnsmasq-dns" Jan 05 22:12:02 crc kubenswrapper[4910]: E0105 22:12:02.977611 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b94e459d-172c-41ca-a38c-384a5f3e323e" containerName="barbican-db-sync" Jan 05 22:12:02 crc kubenswrapper[4910]: I0105 22:12:02.977626 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b94e459d-172c-41ca-a38c-384a5f3e323e" containerName="barbican-db-sync" Jan 05 22:12:02 crc kubenswrapper[4910]: I0105 22:12:02.977861 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="b94e459d-172c-41ca-a38c-384a5f3e323e" containerName="barbican-db-sync" Jan 05 22:12:02 crc kubenswrapper[4910]: I0105 22:12:02.977876 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="be9110f6-9a1a-4e05-8e41-0eb8630686cc" containerName="dnsmasq-dns" Jan 05 22:12:02 crc kubenswrapper[4910]: I0105 22:12:02.978986 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-66897dc6c-9tqxs" Jan 05 22:12:02 crc kubenswrapper[4910]: I0105 22:12:02.983615 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Jan 05 22:12:02 crc kubenswrapper[4910]: I0105 22:12:02.983896 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-rsg6c" Jan 05 22:12:02 crc kubenswrapper[4910]: I0105 22:12:02.984076 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 05 22:12:02 crc kubenswrapper[4910]: I0105 22:12:02.996786 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-66897dc6c-9tqxs"] Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.007909 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-78b74ccb54-wvrcf"] Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.009894 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-78b74ccb54-wvrcf" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.012423 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.023617 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-78b74ccb54-wvrcf"] Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.081246 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce8ea9ec-e799-457a-aaca-e16b591bdf0c-config-data\") pod \"barbican-worker-66897dc6c-9tqxs\" (UID: \"ce8ea9ec-e799-457a-aaca-e16b591bdf0c\") " pod="openstack/barbican-worker-66897dc6c-9tqxs" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.081305 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ce8ea9ec-e799-457a-aaca-e16b591bdf0c-config-data-custom\") pod \"barbican-worker-66897dc6c-9tqxs\" (UID: \"ce8ea9ec-e799-457a-aaca-e16b591bdf0c\") " pod="openstack/barbican-worker-66897dc6c-9tqxs" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.081382 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce8ea9ec-e799-457a-aaca-e16b591bdf0c-combined-ca-bundle\") pod \"barbican-worker-66897dc6c-9tqxs\" (UID: \"ce8ea9ec-e799-457a-aaca-e16b591bdf0c\") " pod="openstack/barbican-worker-66897dc6c-9tqxs" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.081444 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-drhf4\" (UniqueName: \"kubernetes.io/projected/ce8ea9ec-e799-457a-aaca-e16b591bdf0c-kube-api-access-drhf4\") pod \"barbican-worker-66897dc6c-9tqxs\" (UID: \"ce8ea9ec-e799-457a-aaca-e16b591bdf0c\") " pod="openstack/barbican-worker-66897dc6c-9tqxs" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.081468 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ce8ea9ec-e799-457a-aaca-e16b591bdf0c-logs\") pod \"barbican-worker-66897dc6c-9tqxs\" (UID: \"ce8ea9ec-e799-457a-aaca-e16b591bdf0c\") " pod="openstack/barbican-worker-66897dc6c-9tqxs" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.140995 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-54c4dfcffc-hrqn4"] Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.142918 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54c4dfcffc-hrqn4" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.163026 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54c4dfcffc-hrqn4"] Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.185246 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-drhf4\" (UniqueName: \"kubernetes.io/projected/ce8ea9ec-e799-457a-aaca-e16b591bdf0c-kube-api-access-drhf4\") pod \"barbican-worker-66897dc6c-9tqxs\" (UID: \"ce8ea9ec-e799-457a-aaca-e16b591bdf0c\") " pod="openstack/barbican-worker-66897dc6c-9tqxs" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.185335 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ce8ea9ec-e799-457a-aaca-e16b591bdf0c-logs\") pod \"barbican-worker-66897dc6c-9tqxs\" (UID: \"ce8ea9ec-e799-457a-aaca-e16b591bdf0c\") " pod="openstack/barbican-worker-66897dc6c-9tqxs" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.185372 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b-config-data\") pod \"barbican-keystone-listener-78b74ccb54-wvrcf\" (UID: \"cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b\") " pod="openstack/barbican-keystone-listener-78b74ccb54-wvrcf" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.185402 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b-config-data-custom\") pod \"barbican-keystone-listener-78b74ccb54-wvrcf\" (UID: \"cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b\") " pod="openstack/barbican-keystone-listener-78b74ccb54-wvrcf" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.185427 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xssxr\" (UniqueName: \"kubernetes.io/projected/cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b-kube-api-access-xssxr\") pod \"barbican-keystone-listener-78b74ccb54-wvrcf\" (UID: \"cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b\") " pod="openstack/barbican-keystone-listener-78b74ccb54-wvrcf" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.185445 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b-combined-ca-bundle\") pod \"barbican-keystone-listener-78b74ccb54-wvrcf\" (UID: \"cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b\") " pod="openstack/barbican-keystone-listener-78b74ccb54-wvrcf" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.185486 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce8ea9ec-e799-457a-aaca-e16b591bdf0c-config-data\") pod \"barbican-worker-66897dc6c-9tqxs\" (UID: \"ce8ea9ec-e799-457a-aaca-e16b591bdf0c\") " pod="openstack/barbican-worker-66897dc6c-9tqxs" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.185522 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ce8ea9ec-e799-457a-aaca-e16b591bdf0c-config-data-custom\") pod \"barbican-worker-66897dc6c-9tqxs\" (UID: \"ce8ea9ec-e799-457a-aaca-e16b591bdf0c\") " pod="openstack/barbican-worker-66897dc6c-9tqxs" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.185541 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b-logs\") pod \"barbican-keystone-listener-78b74ccb54-wvrcf\" (UID: \"cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b\") " pod="openstack/barbican-keystone-listener-78b74ccb54-wvrcf" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.185586 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce8ea9ec-e799-457a-aaca-e16b591bdf0c-combined-ca-bundle\") pod \"barbican-worker-66897dc6c-9tqxs\" (UID: \"ce8ea9ec-e799-457a-aaca-e16b591bdf0c\") " pod="openstack/barbican-worker-66897dc6c-9tqxs" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.186700 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ce8ea9ec-e799-457a-aaca-e16b591bdf0c-logs\") pod \"barbican-worker-66897dc6c-9tqxs\" (UID: \"ce8ea9ec-e799-457a-aaca-e16b591bdf0c\") " pod="openstack/barbican-worker-66897dc6c-9tqxs" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.192701 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce8ea9ec-e799-457a-aaca-e16b591bdf0c-config-data\") pod \"barbican-worker-66897dc6c-9tqxs\" (UID: \"ce8ea9ec-e799-457a-aaca-e16b591bdf0c\") " pod="openstack/barbican-worker-66897dc6c-9tqxs" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.196887 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce8ea9ec-e799-457a-aaca-e16b591bdf0c-combined-ca-bundle\") pod \"barbican-worker-66897dc6c-9tqxs\" (UID: \"ce8ea9ec-e799-457a-aaca-e16b591bdf0c\") " pod="openstack/barbican-worker-66897dc6c-9tqxs" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.197788 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ce8ea9ec-e799-457a-aaca-e16b591bdf0c-config-data-custom\") pod \"barbican-worker-66897dc6c-9tqxs\" (UID: \"ce8ea9ec-e799-457a-aaca-e16b591bdf0c\") " pod="openstack/barbican-worker-66897dc6c-9tqxs" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.212953 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-drhf4\" (UniqueName: \"kubernetes.io/projected/ce8ea9ec-e799-457a-aaca-e16b591bdf0c-kube-api-access-drhf4\") pod \"barbican-worker-66897dc6c-9tqxs\" (UID: \"ce8ea9ec-e799-457a-aaca-e16b591bdf0c\") " pod="openstack/barbican-worker-66897dc6c-9tqxs" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.245066 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-69bd4bbbcd-dc6q7"] Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.247599 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-69bd4bbbcd-dc6q7" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.250842 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.262898 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-69bd4bbbcd-dc6q7"] Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.287589 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/169a4bd0-220f-4da1-8182-debab448bd90-config\") pod \"dnsmasq-dns-54c4dfcffc-hrqn4\" (UID: \"169a4bd0-220f-4da1-8182-debab448bd90\") " pod="openstack/dnsmasq-dns-54c4dfcffc-hrqn4" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.287635 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b-logs\") pod \"barbican-keystone-listener-78b74ccb54-wvrcf\" (UID: \"cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b\") " pod="openstack/barbican-keystone-listener-78b74ccb54-wvrcf" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.287672 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/169a4bd0-220f-4da1-8182-debab448bd90-dns-svc\") pod \"dnsmasq-dns-54c4dfcffc-hrqn4\" (UID: \"169a4bd0-220f-4da1-8182-debab448bd90\") " pod="openstack/dnsmasq-dns-54c4dfcffc-hrqn4" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.287705 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmbc6\" (UniqueName: \"kubernetes.io/projected/169a4bd0-220f-4da1-8182-debab448bd90-kube-api-access-gmbc6\") pod \"dnsmasq-dns-54c4dfcffc-hrqn4\" (UID: \"169a4bd0-220f-4da1-8182-debab448bd90\") " pod="openstack/dnsmasq-dns-54c4dfcffc-hrqn4" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.287780 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b-config-data\") pod \"barbican-keystone-listener-78b74ccb54-wvrcf\" (UID: \"cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b\") " pod="openstack/barbican-keystone-listener-78b74ccb54-wvrcf" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.287797 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/169a4bd0-220f-4da1-8182-debab448bd90-dns-swift-storage-0\") pod \"dnsmasq-dns-54c4dfcffc-hrqn4\" (UID: \"169a4bd0-220f-4da1-8182-debab448bd90\") " pod="openstack/dnsmasq-dns-54c4dfcffc-hrqn4" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.287815 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/169a4bd0-220f-4da1-8182-debab448bd90-ovsdbserver-nb\") pod \"dnsmasq-dns-54c4dfcffc-hrqn4\" (UID: \"169a4bd0-220f-4da1-8182-debab448bd90\") " pod="openstack/dnsmasq-dns-54c4dfcffc-hrqn4" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.287834 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b-config-data-custom\") pod \"barbican-keystone-listener-78b74ccb54-wvrcf\" (UID: \"cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b\") " pod="openstack/barbican-keystone-listener-78b74ccb54-wvrcf" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.287855 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b-combined-ca-bundle\") pod \"barbican-keystone-listener-78b74ccb54-wvrcf\" (UID: \"cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b\") " pod="openstack/barbican-keystone-listener-78b74ccb54-wvrcf" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.287870 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xssxr\" (UniqueName: \"kubernetes.io/projected/cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b-kube-api-access-xssxr\") pod \"barbican-keystone-listener-78b74ccb54-wvrcf\" (UID: \"cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b\") " pod="openstack/barbican-keystone-listener-78b74ccb54-wvrcf" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.287896 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/169a4bd0-220f-4da1-8182-debab448bd90-ovsdbserver-sb\") pod \"dnsmasq-dns-54c4dfcffc-hrqn4\" (UID: \"169a4bd0-220f-4da1-8182-debab448bd90\") " pod="openstack/dnsmasq-dns-54c4dfcffc-hrqn4" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.290080 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b-logs\") pod \"barbican-keystone-listener-78b74ccb54-wvrcf\" (UID: \"cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b\") " pod="openstack/barbican-keystone-listener-78b74ccb54-wvrcf" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.292862 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b-combined-ca-bundle\") pod \"barbican-keystone-listener-78b74ccb54-wvrcf\" (UID: \"cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b\") " pod="openstack/barbican-keystone-listener-78b74ccb54-wvrcf" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.293596 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b-config-data-custom\") pod \"barbican-keystone-listener-78b74ccb54-wvrcf\" (UID: \"cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b\") " pod="openstack/barbican-keystone-listener-78b74ccb54-wvrcf" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.294189 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b-config-data\") pod \"barbican-keystone-listener-78b74ccb54-wvrcf\" (UID: \"cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b\") " pod="openstack/barbican-keystone-listener-78b74ccb54-wvrcf" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.305736 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-66897dc6c-9tqxs" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.310601 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xssxr\" (UniqueName: \"kubernetes.io/projected/cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b-kube-api-access-xssxr\") pod \"barbican-keystone-listener-78b74ccb54-wvrcf\" (UID: \"cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b\") " pod="openstack/barbican-keystone-listener-78b74ccb54-wvrcf" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.340011 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-78b74ccb54-wvrcf" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.390712 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/169a4bd0-220f-4da1-8182-debab448bd90-config\") pod \"dnsmasq-dns-54c4dfcffc-hrqn4\" (UID: \"169a4bd0-220f-4da1-8182-debab448bd90\") " pod="openstack/dnsmasq-dns-54c4dfcffc-hrqn4" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.390778 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9143cb8-ef34-42b4-b056-ea869bd675b7-config-data\") pod \"barbican-api-69bd4bbbcd-dc6q7\" (UID: \"b9143cb8-ef34-42b4-b056-ea869bd675b7\") " pod="openstack/barbican-api-69bd4bbbcd-dc6q7" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.390813 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b9143cb8-ef34-42b4-b056-ea869bd675b7-config-data-custom\") pod \"barbican-api-69bd4bbbcd-dc6q7\" (UID: \"b9143cb8-ef34-42b4-b056-ea869bd675b7\") " pod="openstack/barbican-api-69bd4bbbcd-dc6q7" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.390849 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/169a4bd0-220f-4da1-8182-debab448bd90-dns-svc\") pod \"dnsmasq-dns-54c4dfcffc-hrqn4\" (UID: \"169a4bd0-220f-4da1-8182-debab448bd90\") " pod="openstack/dnsmasq-dns-54c4dfcffc-hrqn4" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.390884 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dschh\" (UniqueName: \"kubernetes.io/projected/b9143cb8-ef34-42b4-b056-ea869bd675b7-kube-api-access-dschh\") pod \"barbican-api-69bd4bbbcd-dc6q7\" (UID: \"b9143cb8-ef34-42b4-b056-ea869bd675b7\") " pod="openstack/barbican-api-69bd4bbbcd-dc6q7" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.390921 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmbc6\" (UniqueName: \"kubernetes.io/projected/169a4bd0-220f-4da1-8182-debab448bd90-kube-api-access-gmbc6\") pod \"dnsmasq-dns-54c4dfcffc-hrqn4\" (UID: \"169a4bd0-220f-4da1-8182-debab448bd90\") " pod="openstack/dnsmasq-dns-54c4dfcffc-hrqn4" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.390948 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9143cb8-ef34-42b4-b056-ea869bd675b7-combined-ca-bundle\") pod \"barbican-api-69bd4bbbcd-dc6q7\" (UID: \"b9143cb8-ef34-42b4-b056-ea869bd675b7\") " pod="openstack/barbican-api-69bd4bbbcd-dc6q7" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.391022 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9143cb8-ef34-42b4-b056-ea869bd675b7-logs\") pod \"barbican-api-69bd4bbbcd-dc6q7\" (UID: \"b9143cb8-ef34-42b4-b056-ea869bd675b7\") " pod="openstack/barbican-api-69bd4bbbcd-dc6q7" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.391080 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/169a4bd0-220f-4da1-8182-debab448bd90-dns-swift-storage-0\") pod \"dnsmasq-dns-54c4dfcffc-hrqn4\" (UID: \"169a4bd0-220f-4da1-8182-debab448bd90\") " pod="openstack/dnsmasq-dns-54c4dfcffc-hrqn4" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.391106 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/169a4bd0-220f-4da1-8182-debab448bd90-ovsdbserver-nb\") pod \"dnsmasq-dns-54c4dfcffc-hrqn4\" (UID: \"169a4bd0-220f-4da1-8182-debab448bd90\") " pod="openstack/dnsmasq-dns-54c4dfcffc-hrqn4" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.391185 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/169a4bd0-220f-4da1-8182-debab448bd90-ovsdbserver-sb\") pod \"dnsmasq-dns-54c4dfcffc-hrqn4\" (UID: \"169a4bd0-220f-4da1-8182-debab448bd90\") " pod="openstack/dnsmasq-dns-54c4dfcffc-hrqn4" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.392856 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/169a4bd0-220f-4da1-8182-debab448bd90-ovsdbserver-sb\") pod \"dnsmasq-dns-54c4dfcffc-hrqn4\" (UID: \"169a4bd0-220f-4da1-8182-debab448bd90\") " pod="openstack/dnsmasq-dns-54c4dfcffc-hrqn4" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.393336 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/169a4bd0-220f-4da1-8182-debab448bd90-config\") pod \"dnsmasq-dns-54c4dfcffc-hrqn4\" (UID: \"169a4bd0-220f-4da1-8182-debab448bd90\") " pod="openstack/dnsmasq-dns-54c4dfcffc-hrqn4" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.393837 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/169a4bd0-220f-4da1-8182-debab448bd90-dns-swift-storage-0\") pod \"dnsmasq-dns-54c4dfcffc-hrqn4\" (UID: \"169a4bd0-220f-4da1-8182-debab448bd90\") " pod="openstack/dnsmasq-dns-54c4dfcffc-hrqn4" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.394466 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/169a4bd0-220f-4da1-8182-debab448bd90-dns-svc\") pod \"dnsmasq-dns-54c4dfcffc-hrqn4\" (UID: \"169a4bd0-220f-4da1-8182-debab448bd90\") " pod="openstack/dnsmasq-dns-54c4dfcffc-hrqn4" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.394966 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/169a4bd0-220f-4da1-8182-debab448bd90-ovsdbserver-nb\") pod \"dnsmasq-dns-54c4dfcffc-hrqn4\" (UID: \"169a4bd0-220f-4da1-8182-debab448bd90\") " pod="openstack/dnsmasq-dns-54c4dfcffc-hrqn4" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.414935 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmbc6\" (UniqueName: \"kubernetes.io/projected/169a4bd0-220f-4da1-8182-debab448bd90-kube-api-access-gmbc6\") pod \"dnsmasq-dns-54c4dfcffc-hrqn4\" (UID: \"169a4bd0-220f-4da1-8182-debab448bd90\") " pod="openstack/dnsmasq-dns-54c4dfcffc-hrqn4" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.471485 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54c4dfcffc-hrqn4" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.497082 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9143cb8-ef34-42b4-b056-ea869bd675b7-config-data\") pod \"barbican-api-69bd4bbbcd-dc6q7\" (UID: \"b9143cb8-ef34-42b4-b056-ea869bd675b7\") " pod="openstack/barbican-api-69bd4bbbcd-dc6q7" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.498632 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b9143cb8-ef34-42b4-b056-ea869bd675b7-config-data-custom\") pod \"barbican-api-69bd4bbbcd-dc6q7\" (UID: \"b9143cb8-ef34-42b4-b056-ea869bd675b7\") " pod="openstack/barbican-api-69bd4bbbcd-dc6q7" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.498747 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dschh\" (UniqueName: \"kubernetes.io/projected/b9143cb8-ef34-42b4-b056-ea869bd675b7-kube-api-access-dschh\") pod \"barbican-api-69bd4bbbcd-dc6q7\" (UID: \"b9143cb8-ef34-42b4-b056-ea869bd675b7\") " pod="openstack/barbican-api-69bd4bbbcd-dc6q7" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.498810 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9143cb8-ef34-42b4-b056-ea869bd675b7-combined-ca-bundle\") pod \"barbican-api-69bd4bbbcd-dc6q7\" (UID: \"b9143cb8-ef34-42b4-b056-ea869bd675b7\") " pod="openstack/barbican-api-69bd4bbbcd-dc6q7" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.498957 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9143cb8-ef34-42b4-b056-ea869bd675b7-logs\") pod \"barbican-api-69bd4bbbcd-dc6q7\" (UID: \"b9143cb8-ef34-42b4-b056-ea869bd675b7\") " pod="openstack/barbican-api-69bd4bbbcd-dc6q7" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.500148 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9143cb8-ef34-42b4-b056-ea869bd675b7-logs\") pod \"barbican-api-69bd4bbbcd-dc6q7\" (UID: \"b9143cb8-ef34-42b4-b056-ea869bd675b7\") " pod="openstack/barbican-api-69bd4bbbcd-dc6q7" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.509043 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b9143cb8-ef34-42b4-b056-ea869bd675b7-config-data-custom\") pod \"barbican-api-69bd4bbbcd-dc6q7\" (UID: \"b9143cb8-ef34-42b4-b056-ea869bd675b7\") " pod="openstack/barbican-api-69bd4bbbcd-dc6q7" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.510332 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9143cb8-ef34-42b4-b056-ea869bd675b7-combined-ca-bundle\") pod \"barbican-api-69bd4bbbcd-dc6q7\" (UID: \"b9143cb8-ef34-42b4-b056-ea869bd675b7\") " pod="openstack/barbican-api-69bd4bbbcd-dc6q7" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.556269 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dschh\" (UniqueName: \"kubernetes.io/projected/b9143cb8-ef34-42b4-b056-ea869bd675b7-kube-api-access-dschh\") pod \"barbican-api-69bd4bbbcd-dc6q7\" (UID: \"b9143cb8-ef34-42b4-b056-ea869bd675b7\") " pod="openstack/barbican-api-69bd4bbbcd-dc6q7" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.556461 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9143cb8-ef34-42b4-b056-ea869bd675b7-config-data\") pod \"barbican-api-69bd4bbbcd-dc6q7\" (UID: \"b9143cb8-ef34-42b4-b056-ea869bd675b7\") " pod="openstack/barbican-api-69bd4bbbcd-dc6q7" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.615640 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-69bd4bbbcd-dc6q7" Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.675352 4910 generic.go:334] "Generic (PLEG): container finished" podID="c38271dc-8b9a-4bb0-a8bc-2fc78c641aac" containerID="2e60f99295c06fb62dd6ce701a2c4469eb0953a491479b5e292cfd784ef83bab" exitCode=0 Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.675390 4910 generic.go:334] "Generic (PLEG): container finished" podID="c38271dc-8b9a-4bb0-a8bc-2fc78c641aac" containerID="9db8d840f3a91c8db05c8717dfd0f5740fcebb85ae0ce45d04bc51eaa83dfc45" exitCode=2 Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.675415 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac","Type":"ContainerDied","Data":"2e60f99295c06fb62dd6ce701a2c4469eb0953a491479b5e292cfd784ef83bab"} Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.675449 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac","Type":"ContainerDied","Data":"9db8d840f3a91c8db05c8717dfd0f5740fcebb85ae0ce45d04bc51eaa83dfc45"} Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.929225 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-78b74ccb54-wvrcf"] Jan 05 22:12:03 crc kubenswrapper[4910]: I0105 22:12:03.950807 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-66897dc6c-9tqxs"] Jan 05 22:12:04 crc kubenswrapper[4910]: I0105 22:12:04.204252 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54c4dfcffc-hrqn4"] Jan 05 22:12:04 crc kubenswrapper[4910]: I0105 22:12:04.328566 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-69bd4bbbcd-dc6q7"] Jan 05 22:12:04 crc kubenswrapper[4910]: I0105 22:12:04.696608 4910 generic.go:334] "Generic (PLEG): container finished" podID="726618d0-e442-410e-87df-33bca2cf52a4" containerID="9e5bad872d1ceb46b26c5dc21dec8556316d8cada45123a1f5bd1c291685e9f4" exitCode=0 Jan 05 22:12:04 crc kubenswrapper[4910]: I0105 22:12:04.696702 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-jn5f9" event={"ID":"726618d0-e442-410e-87df-33bca2cf52a4","Type":"ContainerDied","Data":"9e5bad872d1ceb46b26c5dc21dec8556316d8cada45123a1f5bd1c291685e9f4"} Jan 05 22:12:04 crc kubenswrapper[4910]: I0105 22:12:04.698923 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-78b74ccb54-wvrcf" event={"ID":"cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b","Type":"ContainerStarted","Data":"bd26a1afce2e60c3a75aefb1c31b748fd2c1227ac679f43b7a472a5f1649119f"} Jan 05 22:12:04 crc kubenswrapper[4910]: I0105 22:12:04.700331 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-69bd4bbbcd-dc6q7" event={"ID":"b9143cb8-ef34-42b4-b056-ea869bd675b7","Type":"ContainerStarted","Data":"652f042e30d65efb5115b879a66a1912016f7403fe17dcb3b09a017b1ccbc6c0"} Jan 05 22:12:04 crc kubenswrapper[4910]: I0105 22:12:04.701977 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54c4dfcffc-hrqn4" event={"ID":"169a4bd0-220f-4da1-8182-debab448bd90","Type":"ContainerStarted","Data":"018750dd21127461c542ee5905e4dfa3e262a9027cede14322c26d885fa54395"} Jan 05 22:12:04 crc kubenswrapper[4910]: I0105 22:12:04.702002 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54c4dfcffc-hrqn4" event={"ID":"169a4bd0-220f-4da1-8182-debab448bd90","Type":"ContainerStarted","Data":"7c3245906b8a8230eb38a39af9155514aa2a795f1d9cf4d232987be1e9eb6954"} Jan 05 22:12:04 crc kubenswrapper[4910]: I0105 22:12:04.703405 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-66897dc6c-9tqxs" event={"ID":"ce8ea9ec-e799-457a-aaca-e16b591bdf0c","Type":"ContainerStarted","Data":"6dae96ce47f7edad278dc6b7a0b2d411166a07289e3cd626972b54862f560b7c"} Jan 05 22:12:05 crc kubenswrapper[4910]: I0105 22:12:05.716621 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-69bd4bbbcd-dc6q7" event={"ID":"b9143cb8-ef34-42b4-b056-ea869bd675b7","Type":"ContainerStarted","Data":"af446d955a631a963b7cdf7ecdd35907c80360de99632f3340cd2d4689dfc5cf"} Jan 05 22:12:05 crc kubenswrapper[4910]: I0105 22:12:05.716982 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-69bd4bbbcd-dc6q7" event={"ID":"b9143cb8-ef34-42b4-b056-ea869bd675b7","Type":"ContainerStarted","Data":"f1469fc930fac9bbb9f9a5fc7a9e0301d0114b166e3cae644d06b17f2cd866f2"} Jan 05 22:12:05 crc kubenswrapper[4910]: I0105 22:12:05.717000 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-69bd4bbbcd-dc6q7" Jan 05 22:12:05 crc kubenswrapper[4910]: I0105 22:12:05.717012 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-69bd4bbbcd-dc6q7" Jan 05 22:12:05 crc kubenswrapper[4910]: I0105 22:12:05.723536 4910 generic.go:334] "Generic (PLEG): container finished" podID="169a4bd0-220f-4da1-8182-debab448bd90" containerID="018750dd21127461c542ee5905e4dfa3e262a9027cede14322c26d885fa54395" exitCode=0 Jan 05 22:12:05 crc kubenswrapper[4910]: I0105 22:12:05.723975 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54c4dfcffc-hrqn4" event={"ID":"169a4bd0-220f-4da1-8182-debab448bd90","Type":"ContainerDied","Data":"018750dd21127461c542ee5905e4dfa3e262a9027cede14322c26d885fa54395"} Jan 05 22:12:05 crc kubenswrapper[4910]: I0105 22:12:05.746763 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-69bd4bbbcd-dc6q7" podStartSLOduration=2.746742507 podStartE2EDuration="2.746742507s" podCreationTimestamp="2026-01-05 22:12:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:12:05.743068165 +0000 UTC m=+1257.320565835" watchObservedRunningTime="2026-01-05 22:12:05.746742507 +0000 UTC m=+1257.324240177" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.317779 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-jn5f9" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.416942 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/726618d0-e442-410e-87df-33bca2cf52a4-scripts\") pod \"726618d0-e442-410e-87df-33bca2cf52a4\" (UID: \"726618d0-e442-410e-87df-33bca2cf52a4\") " Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.417071 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/726618d0-e442-410e-87df-33bca2cf52a4-combined-ca-bundle\") pod \"726618d0-e442-410e-87df-33bca2cf52a4\" (UID: \"726618d0-e442-410e-87df-33bca2cf52a4\") " Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.417105 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fxjfv\" (UniqueName: \"kubernetes.io/projected/726618d0-e442-410e-87df-33bca2cf52a4-kube-api-access-fxjfv\") pod \"726618d0-e442-410e-87df-33bca2cf52a4\" (UID: \"726618d0-e442-410e-87df-33bca2cf52a4\") " Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.417198 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/726618d0-e442-410e-87df-33bca2cf52a4-db-sync-config-data\") pod \"726618d0-e442-410e-87df-33bca2cf52a4\" (UID: \"726618d0-e442-410e-87df-33bca2cf52a4\") " Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.417228 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/726618d0-e442-410e-87df-33bca2cf52a4-config-data\") pod \"726618d0-e442-410e-87df-33bca2cf52a4\" (UID: \"726618d0-e442-410e-87df-33bca2cf52a4\") " Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.417348 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/726618d0-e442-410e-87df-33bca2cf52a4-etc-machine-id\") pod \"726618d0-e442-410e-87df-33bca2cf52a4\" (UID: \"726618d0-e442-410e-87df-33bca2cf52a4\") " Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.417794 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/726618d0-e442-410e-87df-33bca2cf52a4-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "726618d0-e442-410e-87df-33bca2cf52a4" (UID: "726618d0-e442-410e-87df-33bca2cf52a4"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.426167 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/726618d0-e442-410e-87df-33bca2cf52a4-scripts" (OuterVolumeSpecName: "scripts") pod "726618d0-e442-410e-87df-33bca2cf52a4" (UID: "726618d0-e442-410e-87df-33bca2cf52a4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.430348 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/726618d0-e442-410e-87df-33bca2cf52a4-kube-api-access-fxjfv" (OuterVolumeSpecName: "kube-api-access-fxjfv") pod "726618d0-e442-410e-87df-33bca2cf52a4" (UID: "726618d0-e442-410e-87df-33bca2cf52a4"). InnerVolumeSpecName "kube-api-access-fxjfv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.431562 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/726618d0-e442-410e-87df-33bca2cf52a4-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "726618d0-e442-410e-87df-33bca2cf52a4" (UID: "726618d0-e442-410e-87df-33bca2cf52a4"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.489572 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-6bbbdf8dc6-s6tmf"] Jan 05 22:12:06 crc kubenswrapper[4910]: E0105 22:12:06.490042 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="726618d0-e442-410e-87df-33bca2cf52a4" containerName="cinder-db-sync" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.490057 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="726618d0-e442-410e-87df-33bca2cf52a4" containerName="cinder-db-sync" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.490326 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="726618d0-e442-410e-87df-33bca2cf52a4" containerName="cinder-db-sync" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.491543 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.495702 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.495918 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.511033 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/726618d0-e442-410e-87df-33bca2cf52a4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "726618d0-e442-410e-87df-33bca2cf52a4" (UID: "726618d0-e442-410e-87df-33bca2cf52a4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.518682 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/726618d0-e442-410e-87df-33bca2cf52a4-config-data" (OuterVolumeSpecName: "config-data") pod "726618d0-e442-410e-87df-33bca2cf52a4" (UID: "726618d0-e442-410e-87df-33bca2cf52a4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.521694 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/45acd92f-2e5d-4fc1-8b91-c91f165e786a-internal-tls-certs\") pod \"barbican-api-6bbbdf8dc6-s6tmf\" (UID: \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\") " pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.521780 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/45acd92f-2e5d-4fc1-8b91-c91f165e786a-config-data-custom\") pod \"barbican-api-6bbbdf8dc6-s6tmf\" (UID: \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\") " pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.521879 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/45acd92f-2e5d-4fc1-8b91-c91f165e786a-logs\") pod \"barbican-api-6bbbdf8dc6-s6tmf\" (UID: \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\") " pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.521973 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45acd92f-2e5d-4fc1-8b91-c91f165e786a-combined-ca-bundle\") pod \"barbican-api-6bbbdf8dc6-s6tmf\" (UID: \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\") " pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.522347 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/45acd92f-2e5d-4fc1-8b91-c91f165e786a-public-tls-certs\") pod \"barbican-api-6bbbdf8dc6-s6tmf\" (UID: \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\") " pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.522438 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45acd92f-2e5d-4fc1-8b91-c91f165e786a-config-data\") pod \"barbican-api-6bbbdf8dc6-s6tmf\" (UID: \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\") " pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.522512 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzmmb\" (UniqueName: \"kubernetes.io/projected/45acd92f-2e5d-4fc1-8b91-c91f165e786a-kube-api-access-lzmmb\") pod \"barbican-api-6bbbdf8dc6-s6tmf\" (UID: \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\") " pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.522737 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/726618d0-e442-410e-87df-33bca2cf52a4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.522751 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fxjfv\" (UniqueName: \"kubernetes.io/projected/726618d0-e442-410e-87df-33bca2cf52a4-kube-api-access-fxjfv\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.522767 4910 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/726618d0-e442-410e-87df-33bca2cf52a4-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.522777 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/726618d0-e442-410e-87df-33bca2cf52a4-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.522788 4910 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/726618d0-e442-410e-87df-33bca2cf52a4-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.522796 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/726618d0-e442-410e-87df-33bca2cf52a4-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.528627 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6bbbdf8dc6-s6tmf"] Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.624425 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/45acd92f-2e5d-4fc1-8b91-c91f165e786a-internal-tls-certs\") pod \"barbican-api-6bbbdf8dc6-s6tmf\" (UID: \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\") " pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.624781 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/45acd92f-2e5d-4fc1-8b91-c91f165e786a-config-data-custom\") pod \"barbican-api-6bbbdf8dc6-s6tmf\" (UID: \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\") " pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.624906 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/45acd92f-2e5d-4fc1-8b91-c91f165e786a-logs\") pod \"barbican-api-6bbbdf8dc6-s6tmf\" (UID: \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\") " pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.625419 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45acd92f-2e5d-4fc1-8b91-c91f165e786a-combined-ca-bundle\") pod \"barbican-api-6bbbdf8dc6-s6tmf\" (UID: \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\") " pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.625826 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/45acd92f-2e5d-4fc1-8b91-c91f165e786a-public-tls-certs\") pod \"barbican-api-6bbbdf8dc6-s6tmf\" (UID: \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\") " pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.625950 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45acd92f-2e5d-4fc1-8b91-c91f165e786a-config-data\") pod \"barbican-api-6bbbdf8dc6-s6tmf\" (UID: \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\") " pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.626045 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzmmb\" (UniqueName: \"kubernetes.io/projected/45acd92f-2e5d-4fc1-8b91-c91f165e786a-kube-api-access-lzmmb\") pod \"barbican-api-6bbbdf8dc6-s6tmf\" (UID: \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\") " pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.625367 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/45acd92f-2e5d-4fc1-8b91-c91f165e786a-logs\") pod \"barbican-api-6bbbdf8dc6-s6tmf\" (UID: \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\") " pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.628393 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/45acd92f-2e5d-4fc1-8b91-c91f165e786a-internal-tls-certs\") pod \"barbican-api-6bbbdf8dc6-s6tmf\" (UID: \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\") " pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.631274 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/45acd92f-2e5d-4fc1-8b91-c91f165e786a-config-data-custom\") pod \"barbican-api-6bbbdf8dc6-s6tmf\" (UID: \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\") " pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.632801 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45acd92f-2e5d-4fc1-8b91-c91f165e786a-combined-ca-bundle\") pod \"barbican-api-6bbbdf8dc6-s6tmf\" (UID: \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\") " pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.633327 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/45acd92f-2e5d-4fc1-8b91-c91f165e786a-public-tls-certs\") pod \"barbican-api-6bbbdf8dc6-s6tmf\" (UID: \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\") " pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.645266 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzmmb\" (UniqueName: \"kubernetes.io/projected/45acd92f-2e5d-4fc1-8b91-c91f165e786a-kube-api-access-lzmmb\") pod \"barbican-api-6bbbdf8dc6-s6tmf\" (UID: \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\") " pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.645693 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45acd92f-2e5d-4fc1-8b91-c91f165e786a-config-data\") pod \"barbican-api-6bbbdf8dc6-s6tmf\" (UID: \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\") " pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.767271 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-jn5f9" event={"ID":"726618d0-e442-410e-87df-33bca2cf52a4","Type":"ContainerDied","Data":"ec5bba4875b3542eec5ac741aa889c5e5c96d1f306caf6e06c379c15ba2e7a5c"} Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.767324 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ec5bba4875b3542eec5ac741aa889c5e5c96d1f306caf6e06c379c15ba2e7a5c" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.767409 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-jn5f9" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.783312 4910 generic.go:334] "Generic (PLEG): container finished" podID="c38271dc-8b9a-4bb0-a8bc-2fc78c641aac" containerID="be213fc6c3c4cd181483e6f8d5f930cbfb3f3f94dff00ab7b6b2c04364f113bb" exitCode=0 Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.783709 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac","Type":"ContainerDied","Data":"be213fc6c3c4cd181483e6f8d5f930cbfb3f3f94dff00ab7b6b2c04364f113bb"} Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.835522 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" Jan 05 22:12:06 crc kubenswrapper[4910]: I0105 22:12:06.850778 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.007248 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 05 22:12:07 crc kubenswrapper[4910]: E0105 22:12:07.007608 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c38271dc-8b9a-4bb0-a8bc-2fc78c641aac" containerName="sg-core" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.007626 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="c38271dc-8b9a-4bb0-a8bc-2fc78c641aac" containerName="sg-core" Jan 05 22:12:07 crc kubenswrapper[4910]: E0105 22:12:07.007642 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c38271dc-8b9a-4bb0-a8bc-2fc78c641aac" containerName="ceilometer-notification-agent" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.007665 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="c38271dc-8b9a-4bb0-a8bc-2fc78c641aac" containerName="ceilometer-notification-agent" Jan 05 22:12:07 crc kubenswrapper[4910]: E0105 22:12:07.007685 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c38271dc-8b9a-4bb0-a8bc-2fc78c641aac" containerName="proxy-httpd" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.007692 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="c38271dc-8b9a-4bb0-a8bc-2fc78c641aac" containerName="proxy-httpd" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.007859 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="c38271dc-8b9a-4bb0-a8bc-2fc78c641aac" containerName="sg-core" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.007883 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="c38271dc-8b9a-4bb0-a8bc-2fc78c641aac" containerName="ceilometer-notification-agent" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.007899 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="c38271dc-8b9a-4bb0-a8bc-2fc78c641aac" containerName="proxy-httpd" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.008761 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.012881 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.020167 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-zqtsn" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.020454 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.020496 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.032390 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-sg-core-conf-yaml\") pod \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\" (UID: \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\") " Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.032442 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-combined-ca-bundle\") pod \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\" (UID: \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\") " Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.032514 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-scripts\") pod \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\" (UID: \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\") " Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.032595 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-run-httpd\") pod \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\" (UID: \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\") " Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.032615 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z9wxw\" (UniqueName: \"kubernetes.io/projected/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-kube-api-access-z9wxw\") pod \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\" (UID: \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\") " Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.032663 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-config-data\") pod \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\" (UID: \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\") " Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.032715 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-log-httpd\") pod \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\" (UID: \"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac\") " Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.033882 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c38271dc-8b9a-4bb0-a8bc-2fc78c641aac" (UID: "c38271dc-8b9a-4bb0-a8bc-2fc78c641aac"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.034383 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c38271dc-8b9a-4bb0-a8bc-2fc78c641aac" (UID: "c38271dc-8b9a-4bb0-a8bc-2fc78c641aac"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.035667 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.047133 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-kube-api-access-z9wxw" (OuterVolumeSpecName: "kube-api-access-z9wxw") pod "c38271dc-8b9a-4bb0-a8bc-2fc78c641aac" (UID: "c38271dc-8b9a-4bb0-a8bc-2fc78c641aac"). InnerVolumeSpecName "kube-api-access-z9wxw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.088011 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-scripts" (OuterVolumeSpecName: "scripts") pod "c38271dc-8b9a-4bb0-a8bc-2fc78c641aac" (UID: "c38271dc-8b9a-4bb0-a8bc-2fc78c641aac"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.099604 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54c4dfcffc-hrqn4"] Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.135777 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6e9522ec-d5e3-484e-ac80-334021493bb9-scripts\") pod \"cinder-scheduler-0\" (UID: \"6e9522ec-d5e3-484e-ac80-334021493bb9\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.135863 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hdgng\" (UniqueName: \"kubernetes.io/projected/6e9522ec-d5e3-484e-ac80-334021493bb9-kube-api-access-hdgng\") pod \"cinder-scheduler-0\" (UID: \"6e9522ec-d5e3-484e-ac80-334021493bb9\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.135920 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e9522ec-d5e3-484e-ac80-334021493bb9-config-data\") pod \"cinder-scheduler-0\" (UID: \"6e9522ec-d5e3-484e-ac80-334021493bb9\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.135957 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6e9522ec-d5e3-484e-ac80-334021493bb9-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"6e9522ec-d5e3-484e-ac80-334021493bb9\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.135984 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e9522ec-d5e3-484e-ac80-334021493bb9-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"6e9522ec-d5e3-484e-ac80-334021493bb9\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.136026 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6e9522ec-d5e3-484e-ac80-334021493bb9-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"6e9522ec-d5e3-484e-ac80-334021493bb9\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.136151 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.136168 4910 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.136179 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z9wxw\" (UniqueName: \"kubernetes.io/projected/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-kube-api-access-z9wxw\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.136190 4910 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.157816 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6b4f5fc4f-tddh2"] Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.159549 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c38271dc-8b9a-4bb0-a8bc-2fc78c641aac" (UID: "c38271dc-8b9a-4bb0-a8bc-2fc78c641aac"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.160243 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b4f5fc4f-tddh2" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.269785 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b4f5fc4f-tddh2"] Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.307348 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6e9522ec-d5e3-484e-ac80-334021493bb9-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"6e9522ec-d5e3-484e-ac80-334021493bb9\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.307506 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e9522ec-d5e3-484e-ac80-334021493bb9-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"6e9522ec-d5e3-484e-ac80-334021493bb9\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.307622 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6e9522ec-d5e3-484e-ac80-334021493bb9-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"6e9522ec-d5e3-484e-ac80-334021493bb9\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.307813 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6e9522ec-d5e3-484e-ac80-334021493bb9-scripts\") pod \"cinder-scheduler-0\" (UID: \"6e9522ec-d5e3-484e-ac80-334021493bb9\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.307905 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hdgng\" (UniqueName: \"kubernetes.io/projected/6e9522ec-d5e3-484e-ac80-334021493bb9-kube-api-access-hdgng\") pod \"cinder-scheduler-0\" (UID: \"6e9522ec-d5e3-484e-ac80-334021493bb9\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.308013 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e9522ec-d5e3-484e-ac80-334021493bb9-config-data\") pod \"cinder-scheduler-0\" (UID: \"6e9522ec-d5e3-484e-ac80-334021493bb9\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.308077 4910 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.310007 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c38271dc-8b9a-4bb0-a8bc-2fc78c641aac" (UID: "c38271dc-8b9a-4bb0-a8bc-2fc78c641aac"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.310150 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6e9522ec-d5e3-484e-ac80-334021493bb9-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"6e9522ec-d5e3-484e-ac80-334021493bb9\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.346562 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hdgng\" (UniqueName: \"kubernetes.io/projected/6e9522ec-d5e3-484e-ac80-334021493bb9-kube-api-access-hdgng\") pod \"cinder-scheduler-0\" (UID: \"6e9522ec-d5e3-484e-ac80-334021493bb9\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.367157 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6e9522ec-d5e3-484e-ac80-334021493bb9-scripts\") pod \"cinder-scheduler-0\" (UID: \"6e9522ec-d5e3-484e-ac80-334021493bb9\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.369760 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e9522ec-d5e3-484e-ac80-334021493bb9-config-data\") pod \"cinder-scheduler-0\" (UID: \"6e9522ec-d5e3-484e-ac80-334021493bb9\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.372614 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6e9522ec-d5e3-484e-ac80-334021493bb9-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"6e9522ec-d5e3-484e-ac80-334021493bb9\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.383401 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e9522ec-d5e3-484e-ac80-334021493bb9-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"6e9522ec-d5e3-484e-ac80-334021493bb9\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.411255 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-config-data" (OuterVolumeSpecName: "config-data") pod "c38271dc-8b9a-4bb0-a8bc-2fc78c641aac" (UID: "c38271dc-8b9a-4bb0-a8bc-2fc78c641aac"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.411282 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5tqq\" (UniqueName: \"kubernetes.io/projected/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-kube-api-access-g5tqq\") pod \"dnsmasq-dns-6b4f5fc4f-tddh2\" (UID: \"649fef5f-2881-49d4-8bf4-1cf8e93a87b3\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-tddh2" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.411485 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-ovsdbserver-nb\") pod \"dnsmasq-dns-6b4f5fc4f-tddh2\" (UID: \"649fef5f-2881-49d4-8bf4-1cf8e93a87b3\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-tddh2" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.411582 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-ovsdbserver-sb\") pod \"dnsmasq-dns-6b4f5fc4f-tddh2\" (UID: \"649fef5f-2881-49d4-8bf4-1cf8e93a87b3\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-tddh2" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.411649 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-config\") pod \"dnsmasq-dns-6b4f5fc4f-tddh2\" (UID: \"649fef5f-2881-49d4-8bf4-1cf8e93a87b3\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-tddh2" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.411710 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-dns-svc\") pod \"dnsmasq-dns-6b4f5fc4f-tddh2\" (UID: \"649fef5f-2881-49d4-8bf4-1cf8e93a87b3\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-tddh2" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.411787 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-dns-swift-storage-0\") pod \"dnsmasq-dns-6b4f5fc4f-tddh2\" (UID: \"649fef5f-2881-49d4-8bf4-1cf8e93a87b3\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-tddh2" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.411890 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.411905 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.414051 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.416528 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.422485 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.435988 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.513793 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5tqq\" (UniqueName: \"kubernetes.io/projected/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-kube-api-access-g5tqq\") pod \"dnsmasq-dns-6b4f5fc4f-tddh2\" (UID: \"649fef5f-2881-49d4-8bf4-1cf8e93a87b3\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-tddh2" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.513901 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-ovsdbserver-nb\") pod \"dnsmasq-dns-6b4f5fc4f-tddh2\" (UID: \"649fef5f-2881-49d4-8bf4-1cf8e93a87b3\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-tddh2" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.514699 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-ovsdbserver-sb\") pod \"dnsmasq-dns-6b4f5fc4f-tddh2\" (UID: \"649fef5f-2881-49d4-8bf4-1cf8e93a87b3\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-tddh2" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.514793 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-config\") pod \"dnsmasq-dns-6b4f5fc4f-tddh2\" (UID: \"649fef5f-2881-49d4-8bf4-1cf8e93a87b3\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-tddh2" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.514900 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-dns-svc\") pod \"dnsmasq-dns-6b4f5fc4f-tddh2\" (UID: \"649fef5f-2881-49d4-8bf4-1cf8e93a87b3\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-tddh2" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.514952 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-dns-swift-storage-0\") pod \"dnsmasq-dns-6b4f5fc4f-tddh2\" (UID: \"649fef5f-2881-49d4-8bf4-1cf8e93a87b3\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-tddh2" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.514952 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-ovsdbserver-nb\") pod \"dnsmasq-dns-6b4f5fc4f-tddh2\" (UID: \"649fef5f-2881-49d4-8bf4-1cf8e93a87b3\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-tddh2" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.516720 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-dns-swift-storage-0\") pod \"dnsmasq-dns-6b4f5fc4f-tddh2\" (UID: \"649fef5f-2881-49d4-8bf4-1cf8e93a87b3\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-tddh2" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.517687 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-config\") pod \"dnsmasq-dns-6b4f5fc4f-tddh2\" (UID: \"649fef5f-2881-49d4-8bf4-1cf8e93a87b3\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-tddh2" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.517923 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-dns-svc\") pod \"dnsmasq-dns-6b4f5fc4f-tddh2\" (UID: \"649fef5f-2881-49d4-8bf4-1cf8e93a87b3\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-tddh2" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.519047 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-ovsdbserver-sb\") pod \"dnsmasq-dns-6b4f5fc4f-tddh2\" (UID: \"649fef5f-2881-49d4-8bf4-1cf8e93a87b3\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-tddh2" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.537266 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5tqq\" (UniqueName: \"kubernetes.io/projected/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-kube-api-access-g5tqq\") pod \"dnsmasq-dns-6b4f5fc4f-tddh2\" (UID: \"649fef5f-2881-49d4-8bf4-1cf8e93a87b3\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-tddh2" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.585710 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6bbbdf8dc6-s6tmf"] Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.621248 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzq7x\" (UniqueName: \"kubernetes.io/projected/964fd38e-23ed-4b80-864d-dc35db8496c2-kube-api-access-gzq7x\") pod \"cinder-api-0\" (UID: \"964fd38e-23ed-4b80-864d-dc35db8496c2\") " pod="openstack/cinder-api-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.621337 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/964fd38e-23ed-4b80-864d-dc35db8496c2-scripts\") pod \"cinder-api-0\" (UID: \"964fd38e-23ed-4b80-864d-dc35db8496c2\") " pod="openstack/cinder-api-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.621474 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/964fd38e-23ed-4b80-864d-dc35db8496c2-config-data-custom\") pod \"cinder-api-0\" (UID: \"964fd38e-23ed-4b80-864d-dc35db8496c2\") " pod="openstack/cinder-api-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.621507 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/964fd38e-23ed-4b80-864d-dc35db8496c2-config-data\") pod \"cinder-api-0\" (UID: \"964fd38e-23ed-4b80-864d-dc35db8496c2\") " pod="openstack/cinder-api-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.621671 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/964fd38e-23ed-4b80-864d-dc35db8496c2-logs\") pod \"cinder-api-0\" (UID: \"964fd38e-23ed-4b80-864d-dc35db8496c2\") " pod="openstack/cinder-api-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.621714 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/964fd38e-23ed-4b80-864d-dc35db8496c2-etc-machine-id\") pod \"cinder-api-0\" (UID: \"964fd38e-23ed-4b80-864d-dc35db8496c2\") " pod="openstack/cinder-api-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.621747 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/964fd38e-23ed-4b80-864d-dc35db8496c2-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"964fd38e-23ed-4b80-864d-dc35db8496c2\") " pod="openstack/cinder-api-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.660493 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.723493 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzq7x\" (UniqueName: \"kubernetes.io/projected/964fd38e-23ed-4b80-864d-dc35db8496c2-kube-api-access-gzq7x\") pod \"cinder-api-0\" (UID: \"964fd38e-23ed-4b80-864d-dc35db8496c2\") " pod="openstack/cinder-api-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.723565 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/964fd38e-23ed-4b80-864d-dc35db8496c2-scripts\") pod \"cinder-api-0\" (UID: \"964fd38e-23ed-4b80-864d-dc35db8496c2\") " pod="openstack/cinder-api-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.723601 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/964fd38e-23ed-4b80-864d-dc35db8496c2-config-data-custom\") pod \"cinder-api-0\" (UID: \"964fd38e-23ed-4b80-864d-dc35db8496c2\") " pod="openstack/cinder-api-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.723623 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/964fd38e-23ed-4b80-864d-dc35db8496c2-config-data\") pod \"cinder-api-0\" (UID: \"964fd38e-23ed-4b80-864d-dc35db8496c2\") " pod="openstack/cinder-api-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.723679 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/964fd38e-23ed-4b80-864d-dc35db8496c2-logs\") pod \"cinder-api-0\" (UID: \"964fd38e-23ed-4b80-864d-dc35db8496c2\") " pod="openstack/cinder-api-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.723702 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/964fd38e-23ed-4b80-864d-dc35db8496c2-etc-machine-id\") pod \"cinder-api-0\" (UID: \"964fd38e-23ed-4b80-864d-dc35db8496c2\") " pod="openstack/cinder-api-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.723723 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/964fd38e-23ed-4b80-864d-dc35db8496c2-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"964fd38e-23ed-4b80-864d-dc35db8496c2\") " pod="openstack/cinder-api-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.724152 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/964fd38e-23ed-4b80-864d-dc35db8496c2-etc-machine-id\") pod \"cinder-api-0\" (UID: \"964fd38e-23ed-4b80-864d-dc35db8496c2\") " pod="openstack/cinder-api-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.724525 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/964fd38e-23ed-4b80-864d-dc35db8496c2-logs\") pod \"cinder-api-0\" (UID: \"964fd38e-23ed-4b80-864d-dc35db8496c2\") " pod="openstack/cinder-api-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.728220 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/964fd38e-23ed-4b80-864d-dc35db8496c2-config-data-custom\") pod \"cinder-api-0\" (UID: \"964fd38e-23ed-4b80-864d-dc35db8496c2\") " pod="openstack/cinder-api-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.729441 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/964fd38e-23ed-4b80-864d-dc35db8496c2-config-data\") pod \"cinder-api-0\" (UID: \"964fd38e-23ed-4b80-864d-dc35db8496c2\") " pod="openstack/cinder-api-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.730735 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/964fd38e-23ed-4b80-864d-dc35db8496c2-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"964fd38e-23ed-4b80-864d-dc35db8496c2\") " pod="openstack/cinder-api-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.730854 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/964fd38e-23ed-4b80-864d-dc35db8496c2-scripts\") pod \"cinder-api-0\" (UID: \"964fd38e-23ed-4b80-864d-dc35db8496c2\") " pod="openstack/cinder-api-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.744326 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b4f5fc4f-tddh2" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.749713 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzq7x\" (UniqueName: \"kubernetes.io/projected/964fd38e-23ed-4b80-864d-dc35db8496c2-kube-api-access-gzq7x\") pod \"cinder-api-0\" (UID: \"964fd38e-23ed-4b80-864d-dc35db8496c2\") " pod="openstack/cinder-api-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.779239 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.801204 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-78b74ccb54-wvrcf" event={"ID":"cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b","Type":"ContainerStarted","Data":"66adaa6dc30ca0eb6df8fdbc29cb135171d3e16efce93331526042109780467b"} Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.801265 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-78b74ccb54-wvrcf" event={"ID":"cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b","Type":"ContainerStarted","Data":"71aa4a23693de64aaa8cbbd15881cbffefc0342e211e96812e957ba634cedcdb"} Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.809467 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" event={"ID":"45acd92f-2e5d-4fc1-8b91-c91f165e786a","Type":"ContainerStarted","Data":"6104667b5ae1cdcd47a597709123b12716141db09f9b433cb838f5a9fceaa70c"} Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.809553 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" event={"ID":"45acd92f-2e5d-4fc1-8b91-c91f165e786a","Type":"ContainerStarted","Data":"ce24c89d86326d45b19f0ac31614562d1b1568b9c252798eff2b4b44a1749993"} Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.817763 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c38271dc-8b9a-4bb0-a8bc-2fc78c641aac","Type":"ContainerDied","Data":"c16d0255b97b18005fd07da2c3219c413590ef57cd5dda64717961109d853194"} Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.817831 4910 scope.go:117] "RemoveContainer" containerID="2e60f99295c06fb62dd6ce701a2c4469eb0953a491479b5e292cfd784ef83bab" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.818833 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.824416 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-78b74ccb54-wvrcf" podStartSLOduration=3.485338381 podStartE2EDuration="5.824390966s" podCreationTimestamp="2026-01-05 22:12:02 +0000 UTC" firstStartedPulling="2026-01-05 22:12:03.950091934 +0000 UTC m=+1255.527589604" lastFinishedPulling="2026-01-05 22:12:06.289144519 +0000 UTC m=+1257.866642189" observedRunningTime="2026-01-05 22:12:07.821429402 +0000 UTC m=+1259.398927092" watchObservedRunningTime="2026-01-05 22:12:07.824390966 +0000 UTC m=+1259.401888636" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.839147 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54c4dfcffc-hrqn4" event={"ID":"169a4bd0-220f-4da1-8182-debab448bd90","Type":"ContainerStarted","Data":"4dcad8d1eb773fb94ca533f7df9a6c7bb896c9403f55ad805c90309255be94d8"} Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.840703 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-54c4dfcffc-hrqn4" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.849707 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-66897dc6c-9tqxs" event={"ID":"ce8ea9ec-e799-457a-aaca-e16b591bdf0c","Type":"ContainerStarted","Data":"dd977da3f8e7fc9fff03a9de2e1898d7cae116843deeda14da1e479c7ce300a4"} Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.849762 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-66897dc6c-9tqxs" event={"ID":"ce8ea9ec-e799-457a-aaca-e16b591bdf0c","Type":"ContainerStarted","Data":"b200e9f40ae5b0a34ae3718175edc6e00f0e7819999c5ddcf7777af1ffb93d24"} Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.872065 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-54c4dfcffc-hrqn4" podStartSLOduration=4.872040739 podStartE2EDuration="4.872040739s" podCreationTimestamp="2026-01-05 22:12:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:12:07.865452834 +0000 UTC m=+1259.442950504" watchObservedRunningTime="2026-01-05 22:12:07.872040739 +0000 UTC m=+1259.449538409" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.929437 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-66897dc6c-9tqxs" podStartSLOduration=3.608322261 podStartE2EDuration="5.929409226s" podCreationTimestamp="2026-01-05 22:12:02 +0000 UTC" firstStartedPulling="2026-01-05 22:12:03.969918801 +0000 UTC m=+1255.547416471" lastFinishedPulling="2026-01-05 22:12:06.291005766 +0000 UTC m=+1257.868503436" observedRunningTime="2026-01-05 22:12:07.892271796 +0000 UTC m=+1259.469769486" watchObservedRunningTime="2026-01-05 22:12:07.929409226 +0000 UTC m=+1259.506906896" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.946110 4910 scope.go:117] "RemoveContainer" containerID="9db8d840f3a91c8db05c8717dfd0f5740fcebb85ae0ce45d04bc51eaa83dfc45" Jan 05 22:12:07 crc kubenswrapper[4910]: I0105 22:12:07.994360 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.016635 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.024187 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.027327 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.035690 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.044021 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.045961 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.072951 4910 scope.go:117] "RemoveContainer" containerID="be213fc6c3c4cd181483e6f8d5f930cbfb3f3f94dff00ab7b6b2c04364f113bb" Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.141393 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-scripts\") pod \"ceilometer-0\" (UID: \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\") " pod="openstack/ceilometer-0" Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.141804 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\") " pod="openstack/ceilometer-0" Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.141832 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-prgfr\" (UniqueName: \"kubernetes.io/projected/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-kube-api-access-prgfr\") pod \"ceilometer-0\" (UID: \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\") " pod="openstack/ceilometer-0" Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.141894 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-log-httpd\") pod \"ceilometer-0\" (UID: \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\") " pod="openstack/ceilometer-0" Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.141959 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-config-data\") pod \"ceilometer-0\" (UID: \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\") " pod="openstack/ceilometer-0" Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.142029 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\") " pod="openstack/ceilometer-0" Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.142080 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-run-httpd\") pod \"ceilometer-0\" (UID: \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\") " pod="openstack/ceilometer-0" Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.161410 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.243237 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-scripts\") pod \"ceilometer-0\" (UID: \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\") " pod="openstack/ceilometer-0" Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.243284 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\") " pod="openstack/ceilometer-0" Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.243306 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-prgfr\" (UniqueName: \"kubernetes.io/projected/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-kube-api-access-prgfr\") pod \"ceilometer-0\" (UID: \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\") " pod="openstack/ceilometer-0" Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.243352 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-log-httpd\") pod \"ceilometer-0\" (UID: \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\") " pod="openstack/ceilometer-0" Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.243393 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-config-data\") pod \"ceilometer-0\" (UID: \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\") " pod="openstack/ceilometer-0" Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.243435 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\") " pod="openstack/ceilometer-0" Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.243468 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-run-httpd\") pod \"ceilometer-0\" (UID: \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\") " pod="openstack/ceilometer-0" Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.244450 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-run-httpd\") pod \"ceilometer-0\" (UID: \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\") " pod="openstack/ceilometer-0" Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.244459 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-log-httpd\") pod \"ceilometer-0\" (UID: \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\") " pod="openstack/ceilometer-0" Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.251491 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-config-data\") pod \"ceilometer-0\" (UID: \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\") " pod="openstack/ceilometer-0" Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.252391 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\") " pod="openstack/ceilometer-0" Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.253995 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\") " pod="openstack/ceilometer-0" Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.276386 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-scripts\") pod \"ceilometer-0\" (UID: \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\") " pod="openstack/ceilometer-0" Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.279145 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-prgfr\" (UniqueName: \"kubernetes.io/projected/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-kube-api-access-prgfr\") pod \"ceilometer-0\" (UID: \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\") " pod="openstack/ceilometer-0" Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.373830 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.464461 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b4f5fc4f-tddh2"] Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.570466 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.750333 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c38271dc-8b9a-4bb0-a8bc-2fc78c641aac" path="/var/lib/kubelet/pods/c38271dc-8b9a-4bb0-a8bc-2fc78c641aac/volumes" Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.907446 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"6e9522ec-d5e3-484e-ac80-334021493bb9","Type":"ContainerStarted","Data":"9dd02f25375004abe1c1d22368fb50e08b4ade3dc4d18bacb13b5d4853ea31b1"} Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.935574 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" event={"ID":"45acd92f-2e5d-4fc1-8b91-c91f165e786a","Type":"ContainerStarted","Data":"a005751f16bf05306ffd138b7900c870797084700111340ccf797cab547f6f2e"} Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.936770 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.936804 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.959386 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.963770 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b4f5fc4f-tddh2" event={"ID":"649fef5f-2881-49d4-8bf4-1cf8e93a87b3","Type":"ContainerStarted","Data":"3bd0be7e791fc8e3a16907f1479cb022653ddc9358e780336f8ca8f0a5fa95d5"} Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.982790 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" podStartSLOduration=2.982764825 podStartE2EDuration="2.982764825s" podCreationTimestamp="2026-01-05 22:12:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:12:08.965865011 +0000 UTC m=+1260.543362681" watchObservedRunningTime="2026-01-05 22:12:08.982764825 +0000 UTC m=+1260.560262495" Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.984660 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-54c4dfcffc-hrqn4" podUID="169a4bd0-220f-4da1-8182-debab448bd90" containerName="dnsmasq-dns" containerID="cri-o://4dcad8d1eb773fb94ca533f7df9a6c7bb896c9403f55ad805c90309255be94d8" gracePeriod=10 Jan 05 22:12:08 crc kubenswrapper[4910]: I0105 22:12:08.984828 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"964fd38e-23ed-4b80-864d-dc35db8496c2","Type":"ContainerStarted","Data":"6b88f7d6b6d78be0b44ef120cd384537a5a7a319b80c812d43dfbe00c709766d"} Jan 05 22:12:09 crc kubenswrapper[4910]: I0105 22:12:09.716210 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54c4dfcffc-hrqn4" Jan 05 22:12:09 crc kubenswrapper[4910]: I0105 22:12:09.921167 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/169a4bd0-220f-4da1-8182-debab448bd90-ovsdbserver-sb\") pod \"169a4bd0-220f-4da1-8182-debab448bd90\" (UID: \"169a4bd0-220f-4da1-8182-debab448bd90\") " Jan 05 22:12:09 crc kubenswrapper[4910]: I0105 22:12:09.922564 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/169a4bd0-220f-4da1-8182-debab448bd90-dns-svc\") pod \"169a4bd0-220f-4da1-8182-debab448bd90\" (UID: \"169a4bd0-220f-4da1-8182-debab448bd90\") " Jan 05 22:12:09 crc kubenswrapper[4910]: I0105 22:12:09.922612 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/169a4bd0-220f-4da1-8182-debab448bd90-dns-swift-storage-0\") pod \"169a4bd0-220f-4da1-8182-debab448bd90\" (UID: \"169a4bd0-220f-4da1-8182-debab448bd90\") " Jan 05 22:12:09 crc kubenswrapper[4910]: I0105 22:12:09.922678 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gmbc6\" (UniqueName: \"kubernetes.io/projected/169a4bd0-220f-4da1-8182-debab448bd90-kube-api-access-gmbc6\") pod \"169a4bd0-220f-4da1-8182-debab448bd90\" (UID: \"169a4bd0-220f-4da1-8182-debab448bd90\") " Jan 05 22:12:09 crc kubenswrapper[4910]: I0105 22:12:09.922707 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/169a4bd0-220f-4da1-8182-debab448bd90-config\") pod \"169a4bd0-220f-4da1-8182-debab448bd90\" (UID: \"169a4bd0-220f-4da1-8182-debab448bd90\") " Jan 05 22:12:09 crc kubenswrapper[4910]: I0105 22:12:09.922756 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/169a4bd0-220f-4da1-8182-debab448bd90-ovsdbserver-nb\") pod \"169a4bd0-220f-4da1-8182-debab448bd90\" (UID: \"169a4bd0-220f-4da1-8182-debab448bd90\") " Jan 05 22:12:09 crc kubenswrapper[4910]: I0105 22:12:09.931184 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/169a4bd0-220f-4da1-8182-debab448bd90-kube-api-access-gmbc6" (OuterVolumeSpecName: "kube-api-access-gmbc6") pod "169a4bd0-220f-4da1-8182-debab448bd90" (UID: "169a4bd0-220f-4da1-8182-debab448bd90"). InnerVolumeSpecName "kube-api-access-gmbc6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.014581 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/169a4bd0-220f-4da1-8182-debab448bd90-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "169a4bd0-220f-4da1-8182-debab448bd90" (UID: "169a4bd0-220f-4da1-8182-debab448bd90"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.025191 4910 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/169a4bd0-220f-4da1-8182-debab448bd90-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.025429 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gmbc6\" (UniqueName: \"kubernetes.io/projected/169a4bd0-220f-4da1-8182-debab448bd90-kube-api-access-gmbc6\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.029954 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d949f571-b2a3-43ba-8f4a-65d0a3eed74a","Type":"ContainerStarted","Data":"c60c8411f9c2399b8bae52ff71314c32f56a5a07355eed4bdebfb3ace1d5b675"} Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.030005 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d949f571-b2a3-43ba-8f4a-65d0a3eed74a","Type":"ContainerStarted","Data":"61d5311da9ff4bf9e3bd08ed3628a5a67dbab1eafcb0e9c7c54d1d48198e5420"} Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.032460 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/169a4bd0-220f-4da1-8182-debab448bd90-config" (OuterVolumeSpecName: "config") pod "169a4bd0-220f-4da1-8182-debab448bd90" (UID: "169a4bd0-220f-4da1-8182-debab448bd90"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.033148 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/169a4bd0-220f-4da1-8182-debab448bd90-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "169a4bd0-220f-4da1-8182-debab448bd90" (UID: "169a4bd0-220f-4da1-8182-debab448bd90"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.033842 4910 generic.go:334] "Generic (PLEG): container finished" podID="649fef5f-2881-49d4-8bf4-1cf8e93a87b3" containerID="ca55847c5a90177cacabbb3c972e4287be7959ac0ac8664ef9f61c1b46b3dc56" exitCode=0 Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.033930 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b4f5fc4f-tddh2" event={"ID":"649fef5f-2881-49d4-8bf4-1cf8e93a87b3","Type":"ContainerDied","Data":"ca55847c5a90177cacabbb3c972e4287be7959ac0ac8664ef9f61c1b46b3dc56"} Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.036860 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/169a4bd0-220f-4da1-8182-debab448bd90-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "169a4bd0-220f-4da1-8182-debab448bd90" (UID: "169a4bd0-220f-4da1-8182-debab448bd90"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.047762 4910 generic.go:334] "Generic (PLEG): container finished" podID="169a4bd0-220f-4da1-8182-debab448bd90" containerID="4dcad8d1eb773fb94ca533f7df9a6c7bb896c9403f55ad805c90309255be94d8" exitCode=0 Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.047851 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54c4dfcffc-hrqn4" Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.047861 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54c4dfcffc-hrqn4" event={"ID":"169a4bd0-220f-4da1-8182-debab448bd90","Type":"ContainerDied","Data":"4dcad8d1eb773fb94ca533f7df9a6c7bb896c9403f55ad805c90309255be94d8"} Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.047893 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54c4dfcffc-hrqn4" event={"ID":"169a4bd0-220f-4da1-8182-debab448bd90","Type":"ContainerDied","Data":"7c3245906b8a8230eb38a39af9155514aa2a795f1d9cf4d232987be1e9eb6954"} Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.047916 4910 scope.go:117] "RemoveContainer" containerID="4dcad8d1eb773fb94ca533f7df9a6c7bb896c9403f55ad805c90309255be94d8" Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.057225 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"964fd38e-23ed-4b80-864d-dc35db8496c2","Type":"ContainerStarted","Data":"fcd53137ddb69f96163c96a34045170aeb669d02fcd88cdada78240f77dab0bd"} Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.059517 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/169a4bd0-220f-4da1-8182-debab448bd90-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "169a4bd0-220f-4da1-8182-debab448bd90" (UID: "169a4bd0-220f-4da1-8182-debab448bd90"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.084679 4910 scope.go:117] "RemoveContainer" containerID="018750dd21127461c542ee5905e4dfa3e262a9027cede14322c26d885fa54395" Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.127769 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/169a4bd0-220f-4da1-8182-debab448bd90-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.127799 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/169a4bd0-220f-4da1-8182-debab448bd90-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.127810 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/169a4bd0-220f-4da1-8182-debab448bd90-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.127819 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/169a4bd0-220f-4da1-8182-debab448bd90-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.154034 4910 scope.go:117] "RemoveContainer" containerID="4dcad8d1eb773fb94ca533f7df9a6c7bb896c9403f55ad805c90309255be94d8" Jan 05 22:12:10 crc kubenswrapper[4910]: E0105 22:12:10.154563 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4dcad8d1eb773fb94ca533f7df9a6c7bb896c9403f55ad805c90309255be94d8\": container with ID starting with 4dcad8d1eb773fb94ca533f7df9a6c7bb896c9403f55ad805c90309255be94d8 not found: ID does not exist" containerID="4dcad8d1eb773fb94ca533f7df9a6c7bb896c9403f55ad805c90309255be94d8" Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.154608 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4dcad8d1eb773fb94ca533f7df9a6c7bb896c9403f55ad805c90309255be94d8"} err="failed to get container status \"4dcad8d1eb773fb94ca533f7df9a6c7bb896c9403f55ad805c90309255be94d8\": rpc error: code = NotFound desc = could not find container \"4dcad8d1eb773fb94ca533f7df9a6c7bb896c9403f55ad805c90309255be94d8\": container with ID starting with 4dcad8d1eb773fb94ca533f7df9a6c7bb896c9403f55ad805c90309255be94d8 not found: ID does not exist" Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.154635 4910 scope.go:117] "RemoveContainer" containerID="018750dd21127461c542ee5905e4dfa3e262a9027cede14322c26d885fa54395" Jan 05 22:12:10 crc kubenswrapper[4910]: E0105 22:12:10.154895 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"018750dd21127461c542ee5905e4dfa3e262a9027cede14322c26d885fa54395\": container with ID starting with 018750dd21127461c542ee5905e4dfa3e262a9027cede14322c26d885fa54395 not found: ID does not exist" containerID="018750dd21127461c542ee5905e4dfa3e262a9027cede14322c26d885fa54395" Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.154918 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"018750dd21127461c542ee5905e4dfa3e262a9027cede14322c26d885fa54395"} err="failed to get container status \"018750dd21127461c542ee5905e4dfa3e262a9027cede14322c26d885fa54395\": rpc error: code = NotFound desc = could not find container \"018750dd21127461c542ee5905e4dfa3e262a9027cede14322c26d885fa54395\": container with ID starting with 018750dd21127461c542ee5905e4dfa3e262a9027cede14322c26d885fa54395 not found: ID does not exist" Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.398675 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54c4dfcffc-hrqn4"] Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.407090 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-54c4dfcffc-hrqn4"] Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.581071 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.737955 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="169a4bd0-220f-4da1-8182-debab448bd90" path="/var/lib/kubelet/pods/169a4bd0-220f-4da1-8182-debab448bd90/volumes" Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.952142 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.953465 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.953589 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.954380 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"657357707be4d8c777ee71d089740dbf0952f7ef5dc120116497297a0abbc7b5"} pod="openshift-machine-config-operator/machine-config-daemon-p4t85" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 05 22:12:10 crc kubenswrapper[4910]: I0105 22:12:10.954515 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" containerID="cri-o://657357707be4d8c777ee71d089740dbf0952f7ef5dc120116497297a0abbc7b5" gracePeriod=600 Jan 05 22:12:11 crc kubenswrapper[4910]: I0105 22:12:11.103640 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"964fd38e-23ed-4b80-864d-dc35db8496c2","Type":"ContainerStarted","Data":"72036ec4c6fb62fb9c56f3e0e0168b334a66e127fd98f9557ebccee565d9ed86"} Jan 05 22:12:11 crc kubenswrapper[4910]: I0105 22:12:11.103768 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 05 22:12:11 crc kubenswrapper[4910]: I0105 22:12:11.104080 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="964fd38e-23ed-4b80-864d-dc35db8496c2" containerName="cinder-api-log" containerID="cri-o://fcd53137ddb69f96163c96a34045170aeb669d02fcd88cdada78240f77dab0bd" gracePeriod=30 Jan 05 22:12:11 crc kubenswrapper[4910]: I0105 22:12:11.104097 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="964fd38e-23ed-4b80-864d-dc35db8496c2" containerName="cinder-api" containerID="cri-o://72036ec4c6fb62fb9c56f3e0e0168b334a66e127fd98f9557ebccee565d9ed86" gracePeriod=30 Jan 05 22:12:11 crc kubenswrapper[4910]: I0105 22:12:11.108729 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"6e9522ec-d5e3-484e-ac80-334021493bb9","Type":"ContainerStarted","Data":"ae0e4f6f3d169570f87594f0c3977324407fc123cc128d0b893f011641a0e1ca"} Jan 05 22:12:11 crc kubenswrapper[4910]: I0105 22:12:11.112991 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d949f571-b2a3-43ba-8f4a-65d0a3eed74a","Type":"ContainerStarted","Data":"c9da7326419acb23c844811323e67f423734a62690a3263e5e16ff1268ccc0c4"} Jan 05 22:12:11 crc kubenswrapper[4910]: I0105 22:12:11.117756 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b4f5fc4f-tddh2" event={"ID":"649fef5f-2881-49d4-8bf4-1cf8e93a87b3","Type":"ContainerStarted","Data":"35bc838138999a85ba33aa625ee381aff61ed72cb60e68c0f63973fd33ed2d44"} Jan 05 22:12:11 crc kubenswrapper[4910]: I0105 22:12:11.118081 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6b4f5fc4f-tddh2" Jan 05 22:12:11 crc kubenswrapper[4910]: I0105 22:12:11.136151 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.13613135 podStartE2EDuration="4.13613135s" podCreationTimestamp="2026-01-05 22:12:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:12:11.130264823 +0000 UTC m=+1262.707762523" watchObservedRunningTime="2026-01-05 22:12:11.13613135 +0000 UTC m=+1262.713629020" Jan 05 22:12:11 crc kubenswrapper[4910]: I0105 22:12:11.163520 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6b4f5fc4f-tddh2" podStartSLOduration=4.163497825 podStartE2EDuration="4.163497825s" podCreationTimestamp="2026-01-05 22:12:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:12:11.155590687 +0000 UTC m=+1262.733088357" watchObservedRunningTime="2026-01-05 22:12:11.163497825 +0000 UTC m=+1262.740995485" Jan 05 22:12:11 crc kubenswrapper[4910]: I0105 22:12:11.987400 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.130753 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"6e9522ec-d5e3-484e-ac80-334021493bb9","Type":"ContainerStarted","Data":"315a5f9c520303843cc8ccecd702c7bbdbefb5eff71d5e8495a1344958c30556"} Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.133294 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d949f571-b2a3-43ba-8f4a-65d0a3eed74a","Type":"ContainerStarted","Data":"92a0da346a6c3e2e2491fb688bd2124b3883a1389df5d37d28a3a0825bd343cf"} Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.135589 4910 generic.go:334] "Generic (PLEG): container finished" podID="964fd38e-23ed-4b80-864d-dc35db8496c2" containerID="72036ec4c6fb62fb9c56f3e0e0168b334a66e127fd98f9557ebccee565d9ed86" exitCode=0 Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.135620 4910 generic.go:334] "Generic (PLEG): container finished" podID="964fd38e-23ed-4b80-864d-dc35db8496c2" containerID="fcd53137ddb69f96163c96a34045170aeb669d02fcd88cdada78240f77dab0bd" exitCode=143 Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.135664 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.135681 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"964fd38e-23ed-4b80-864d-dc35db8496c2","Type":"ContainerDied","Data":"72036ec4c6fb62fb9c56f3e0e0168b334a66e127fd98f9557ebccee565d9ed86"} Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.135711 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"964fd38e-23ed-4b80-864d-dc35db8496c2","Type":"ContainerDied","Data":"fcd53137ddb69f96163c96a34045170aeb669d02fcd88cdada78240f77dab0bd"} Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.135725 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"964fd38e-23ed-4b80-864d-dc35db8496c2","Type":"ContainerDied","Data":"6b88f7d6b6d78be0b44ef120cd384537a5a7a319b80c812d43dfbe00c709766d"} Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.135746 4910 scope.go:117] "RemoveContainer" containerID="72036ec4c6fb62fb9c56f3e0e0168b334a66e127fd98f9557ebccee565d9ed86" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.139156 4910 generic.go:334] "Generic (PLEG): container finished" podID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerID="657357707be4d8c777ee71d089740dbf0952f7ef5dc120116497297a0abbc7b5" exitCode=0 Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.139226 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerDied","Data":"657357707be4d8c777ee71d089740dbf0952f7ef5dc120116497297a0abbc7b5"} Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.139276 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerStarted","Data":"3c994ce088089ca2a9dc19bf92bc43649f3bc30178471fa64d55a2db65d9d2ab"} Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.153258 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.915090866 podStartE2EDuration="6.153237321s" podCreationTimestamp="2026-01-05 22:12:06 +0000 UTC" firstStartedPulling="2026-01-05 22:12:08.176141875 +0000 UTC m=+1259.753639545" lastFinishedPulling="2026-01-05 22:12:09.41428833 +0000 UTC m=+1260.991786000" observedRunningTime="2026-01-05 22:12:12.14960687 +0000 UTC m=+1263.727104550" watchObservedRunningTime="2026-01-05 22:12:12.153237321 +0000 UTC m=+1263.730734991" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.165748 4910 scope.go:117] "RemoveContainer" containerID="fcd53137ddb69f96163c96a34045170aeb669d02fcd88cdada78240f77dab0bd" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.169429 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/964fd38e-23ed-4b80-864d-dc35db8496c2-config-data-custom\") pod \"964fd38e-23ed-4b80-864d-dc35db8496c2\" (UID: \"964fd38e-23ed-4b80-864d-dc35db8496c2\") " Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.169495 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/964fd38e-23ed-4b80-864d-dc35db8496c2-config-data\") pod \"964fd38e-23ed-4b80-864d-dc35db8496c2\" (UID: \"964fd38e-23ed-4b80-864d-dc35db8496c2\") " Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.169667 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/964fd38e-23ed-4b80-864d-dc35db8496c2-logs\") pod \"964fd38e-23ed-4b80-864d-dc35db8496c2\" (UID: \"964fd38e-23ed-4b80-864d-dc35db8496c2\") " Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.169707 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/964fd38e-23ed-4b80-864d-dc35db8496c2-scripts\") pod \"964fd38e-23ed-4b80-864d-dc35db8496c2\" (UID: \"964fd38e-23ed-4b80-864d-dc35db8496c2\") " Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.169752 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gzq7x\" (UniqueName: \"kubernetes.io/projected/964fd38e-23ed-4b80-864d-dc35db8496c2-kube-api-access-gzq7x\") pod \"964fd38e-23ed-4b80-864d-dc35db8496c2\" (UID: \"964fd38e-23ed-4b80-864d-dc35db8496c2\") " Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.169838 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/964fd38e-23ed-4b80-864d-dc35db8496c2-combined-ca-bundle\") pod \"964fd38e-23ed-4b80-864d-dc35db8496c2\" (UID: \"964fd38e-23ed-4b80-864d-dc35db8496c2\") " Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.169865 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/964fd38e-23ed-4b80-864d-dc35db8496c2-etc-machine-id\") pod \"964fd38e-23ed-4b80-864d-dc35db8496c2\" (UID: \"964fd38e-23ed-4b80-864d-dc35db8496c2\") " Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.170707 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/964fd38e-23ed-4b80-864d-dc35db8496c2-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "964fd38e-23ed-4b80-864d-dc35db8496c2" (UID: "964fd38e-23ed-4b80-864d-dc35db8496c2"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.172536 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/964fd38e-23ed-4b80-864d-dc35db8496c2-logs" (OuterVolumeSpecName: "logs") pod "964fd38e-23ed-4b80-864d-dc35db8496c2" (UID: "964fd38e-23ed-4b80-864d-dc35db8496c2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.177028 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/964fd38e-23ed-4b80-864d-dc35db8496c2-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "964fd38e-23ed-4b80-864d-dc35db8496c2" (UID: "964fd38e-23ed-4b80-864d-dc35db8496c2"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.177217 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/964fd38e-23ed-4b80-864d-dc35db8496c2-kube-api-access-gzq7x" (OuterVolumeSpecName: "kube-api-access-gzq7x") pod "964fd38e-23ed-4b80-864d-dc35db8496c2" (UID: "964fd38e-23ed-4b80-864d-dc35db8496c2"). InnerVolumeSpecName "kube-api-access-gzq7x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.183663 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/964fd38e-23ed-4b80-864d-dc35db8496c2-scripts" (OuterVolumeSpecName: "scripts") pod "964fd38e-23ed-4b80-864d-dc35db8496c2" (UID: "964fd38e-23ed-4b80-864d-dc35db8496c2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.219869 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/964fd38e-23ed-4b80-864d-dc35db8496c2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "964fd38e-23ed-4b80-864d-dc35db8496c2" (UID: "964fd38e-23ed-4b80-864d-dc35db8496c2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.252451 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/964fd38e-23ed-4b80-864d-dc35db8496c2-config-data" (OuterVolumeSpecName: "config-data") pod "964fd38e-23ed-4b80-864d-dc35db8496c2" (UID: "964fd38e-23ed-4b80-864d-dc35db8496c2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.272702 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/964fd38e-23ed-4b80-864d-dc35db8496c2-logs\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.272736 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/964fd38e-23ed-4b80-864d-dc35db8496c2-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.272748 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gzq7x\" (UniqueName: \"kubernetes.io/projected/964fd38e-23ed-4b80-864d-dc35db8496c2-kube-api-access-gzq7x\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.272761 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/964fd38e-23ed-4b80-864d-dc35db8496c2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.272772 4910 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/964fd38e-23ed-4b80-864d-dc35db8496c2-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.272958 4910 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/964fd38e-23ed-4b80-864d-dc35db8496c2-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.273682 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/964fd38e-23ed-4b80-864d-dc35db8496c2-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.331151 4910 scope.go:117] "RemoveContainer" containerID="72036ec4c6fb62fb9c56f3e0e0168b334a66e127fd98f9557ebccee565d9ed86" Jan 05 22:12:12 crc kubenswrapper[4910]: E0105 22:12:12.331856 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"72036ec4c6fb62fb9c56f3e0e0168b334a66e127fd98f9557ebccee565d9ed86\": container with ID starting with 72036ec4c6fb62fb9c56f3e0e0168b334a66e127fd98f9557ebccee565d9ed86 not found: ID does not exist" containerID="72036ec4c6fb62fb9c56f3e0e0168b334a66e127fd98f9557ebccee565d9ed86" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.331928 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"72036ec4c6fb62fb9c56f3e0e0168b334a66e127fd98f9557ebccee565d9ed86"} err="failed to get container status \"72036ec4c6fb62fb9c56f3e0e0168b334a66e127fd98f9557ebccee565d9ed86\": rpc error: code = NotFound desc = could not find container \"72036ec4c6fb62fb9c56f3e0e0168b334a66e127fd98f9557ebccee565d9ed86\": container with ID starting with 72036ec4c6fb62fb9c56f3e0e0168b334a66e127fd98f9557ebccee565d9ed86 not found: ID does not exist" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.331970 4910 scope.go:117] "RemoveContainer" containerID="fcd53137ddb69f96163c96a34045170aeb669d02fcd88cdada78240f77dab0bd" Jan 05 22:12:12 crc kubenswrapper[4910]: E0105 22:12:12.332571 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fcd53137ddb69f96163c96a34045170aeb669d02fcd88cdada78240f77dab0bd\": container with ID starting with fcd53137ddb69f96163c96a34045170aeb669d02fcd88cdada78240f77dab0bd not found: ID does not exist" containerID="fcd53137ddb69f96163c96a34045170aeb669d02fcd88cdada78240f77dab0bd" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.332608 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fcd53137ddb69f96163c96a34045170aeb669d02fcd88cdada78240f77dab0bd"} err="failed to get container status \"fcd53137ddb69f96163c96a34045170aeb669d02fcd88cdada78240f77dab0bd\": rpc error: code = NotFound desc = could not find container \"fcd53137ddb69f96163c96a34045170aeb669d02fcd88cdada78240f77dab0bd\": container with ID starting with fcd53137ddb69f96163c96a34045170aeb669d02fcd88cdada78240f77dab0bd not found: ID does not exist" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.332636 4910 scope.go:117] "RemoveContainer" containerID="72036ec4c6fb62fb9c56f3e0e0168b334a66e127fd98f9557ebccee565d9ed86" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.332950 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"72036ec4c6fb62fb9c56f3e0e0168b334a66e127fd98f9557ebccee565d9ed86"} err="failed to get container status \"72036ec4c6fb62fb9c56f3e0e0168b334a66e127fd98f9557ebccee565d9ed86\": rpc error: code = NotFound desc = could not find container \"72036ec4c6fb62fb9c56f3e0e0168b334a66e127fd98f9557ebccee565d9ed86\": container with ID starting with 72036ec4c6fb62fb9c56f3e0e0168b334a66e127fd98f9557ebccee565d9ed86 not found: ID does not exist" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.332968 4910 scope.go:117] "RemoveContainer" containerID="fcd53137ddb69f96163c96a34045170aeb669d02fcd88cdada78240f77dab0bd" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.333240 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fcd53137ddb69f96163c96a34045170aeb669d02fcd88cdada78240f77dab0bd"} err="failed to get container status \"fcd53137ddb69f96163c96a34045170aeb669d02fcd88cdada78240f77dab0bd\": rpc error: code = NotFound desc = could not find container \"fcd53137ddb69f96163c96a34045170aeb669d02fcd88cdada78240f77dab0bd\": container with ID starting with fcd53137ddb69f96163c96a34045170aeb669d02fcd88cdada78240f77dab0bd not found: ID does not exist" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.333266 4910 scope.go:117] "RemoveContainer" containerID="a3fde00ac3c0f56cd1cc5b71d4cb8772dfa8207e8240fa1964337d79bc9075bf" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.481163 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.495936 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.504260 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 05 22:12:12 crc kubenswrapper[4910]: E0105 22:12:12.504691 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="964fd38e-23ed-4b80-864d-dc35db8496c2" containerName="cinder-api" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.504713 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="964fd38e-23ed-4b80-864d-dc35db8496c2" containerName="cinder-api" Jan 05 22:12:12 crc kubenswrapper[4910]: E0105 22:12:12.504743 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="169a4bd0-220f-4da1-8182-debab448bd90" containerName="dnsmasq-dns" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.504750 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="169a4bd0-220f-4da1-8182-debab448bd90" containerName="dnsmasq-dns" Jan 05 22:12:12 crc kubenswrapper[4910]: E0105 22:12:12.504764 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="964fd38e-23ed-4b80-864d-dc35db8496c2" containerName="cinder-api-log" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.504770 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="964fd38e-23ed-4b80-864d-dc35db8496c2" containerName="cinder-api-log" Jan 05 22:12:12 crc kubenswrapper[4910]: E0105 22:12:12.504780 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="169a4bd0-220f-4da1-8182-debab448bd90" containerName="init" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.504786 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="169a4bd0-220f-4da1-8182-debab448bd90" containerName="init" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.504937 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="169a4bd0-220f-4da1-8182-debab448bd90" containerName="dnsmasq-dns" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.504955 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="964fd38e-23ed-4b80-864d-dc35db8496c2" containerName="cinder-api" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.504965 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="964fd38e-23ed-4b80-864d-dc35db8496c2" containerName="cinder-api-log" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.507407 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.509800 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.509968 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.510085 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.516419 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.661351 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.681780 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " pod="openstack/cinder-api-0" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.681858 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-public-tls-certs\") pod \"cinder-api-0\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " pod="openstack/cinder-api-0" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.681888 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-config-data\") pod \"cinder-api-0\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " pod="openstack/cinder-api-0" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.681907 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-config-data-custom\") pod \"cinder-api-0\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " pod="openstack/cinder-api-0" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.681928 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9p6gw\" (UniqueName: \"kubernetes.io/projected/07efd759-c536-425d-938e-a8ccd41706cd-kube-api-access-9p6gw\") pod \"cinder-api-0\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " pod="openstack/cinder-api-0" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.681965 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " pod="openstack/cinder-api-0" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.681980 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-scripts\") pod \"cinder-api-0\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " pod="openstack/cinder-api-0" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.681999 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/07efd759-c536-425d-938e-a8ccd41706cd-etc-machine-id\") pod \"cinder-api-0\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " pod="openstack/cinder-api-0" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.682053 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07efd759-c536-425d-938e-a8ccd41706cd-logs\") pod \"cinder-api-0\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " pod="openstack/cinder-api-0" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.733168 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="964fd38e-23ed-4b80-864d-dc35db8496c2" path="/var/lib/kubelet/pods/964fd38e-23ed-4b80-864d-dc35db8496c2/volumes" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.784136 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-public-tls-certs\") pod \"cinder-api-0\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " pod="openstack/cinder-api-0" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.784209 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-config-data\") pod \"cinder-api-0\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " pod="openstack/cinder-api-0" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.784246 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-config-data-custom\") pod \"cinder-api-0\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " pod="openstack/cinder-api-0" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.784323 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9p6gw\" (UniqueName: \"kubernetes.io/projected/07efd759-c536-425d-938e-a8ccd41706cd-kube-api-access-9p6gw\") pod \"cinder-api-0\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " pod="openstack/cinder-api-0" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.784358 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " pod="openstack/cinder-api-0" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.784381 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-scripts\") pod \"cinder-api-0\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " pod="openstack/cinder-api-0" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.784409 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/07efd759-c536-425d-938e-a8ccd41706cd-etc-machine-id\") pod \"cinder-api-0\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " pod="openstack/cinder-api-0" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.784483 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07efd759-c536-425d-938e-a8ccd41706cd-logs\") pod \"cinder-api-0\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " pod="openstack/cinder-api-0" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.784554 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " pod="openstack/cinder-api-0" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.784580 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/07efd759-c536-425d-938e-a8ccd41706cd-etc-machine-id\") pod \"cinder-api-0\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " pod="openstack/cinder-api-0" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.785059 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07efd759-c536-425d-938e-a8ccd41706cd-logs\") pod \"cinder-api-0\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " pod="openstack/cinder-api-0" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.788248 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " pod="openstack/cinder-api-0" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.789665 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-public-tls-certs\") pod \"cinder-api-0\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " pod="openstack/cinder-api-0" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.790109 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-config-data-custom\") pod \"cinder-api-0\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " pod="openstack/cinder-api-0" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.790975 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-config-data\") pod \"cinder-api-0\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " pod="openstack/cinder-api-0" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.792586 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-scripts\") pod \"cinder-api-0\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " pod="openstack/cinder-api-0" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.794717 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " pod="openstack/cinder-api-0" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.806930 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9p6gw\" (UniqueName: \"kubernetes.io/projected/07efd759-c536-425d-938e-a8ccd41706cd-kube-api-access-9p6gw\") pod \"cinder-api-0\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " pod="openstack/cinder-api-0" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.860745 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 05 22:12:12 crc kubenswrapper[4910]: I0105 22:12:12.880721 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5c84f4b854-d9fkv" Jan 05 22:12:13 crc kubenswrapper[4910]: I0105 22:12:13.161651 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d949f571-b2a3-43ba-8f4a-65d0a3eed74a","Type":"ContainerStarted","Data":"b5949571b11635d2d0c6aa2c54638023c2e8fa9a2a1bc6caa4e781c517195041"} Jan 05 22:12:13 crc kubenswrapper[4910]: I0105 22:12:13.162357 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 05 22:12:13 crc kubenswrapper[4910]: I0105 22:12:13.216942 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.545082226 podStartE2EDuration="6.216916947s" podCreationTimestamp="2026-01-05 22:12:07 +0000 UTC" firstStartedPulling="2026-01-05 22:12:08.977736199 +0000 UTC m=+1260.555233869" lastFinishedPulling="2026-01-05 22:12:12.64957092 +0000 UTC m=+1264.227068590" observedRunningTime="2026-01-05 22:12:13.199354697 +0000 UTC m=+1264.776852367" watchObservedRunningTime="2026-01-05 22:12:13.216916947 +0000 UTC m=+1264.794414617" Jan 05 22:12:13 crc kubenswrapper[4910]: I0105 22:12:13.481442 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 05 22:12:14 crc kubenswrapper[4910]: I0105 22:12:14.176862 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"07efd759-c536-425d-938e-a8ccd41706cd","Type":"ContainerStarted","Data":"6daa2eb7900c845da95b4889f00144bf520b49eeafeeefc6d62129f8760b3df1"} Jan 05 22:12:14 crc kubenswrapper[4910]: I0105 22:12:14.177519 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"07efd759-c536-425d-938e-a8ccd41706cd","Type":"ContainerStarted","Data":"df4ee361dfedceaf2c81953233c13b2456b83fc8b0f6d991dc2731dd08887cbf"} Jan 05 22:12:15 crc kubenswrapper[4910]: I0105 22:12:15.173675 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-69bd4bbbcd-dc6q7" Jan 05 22:12:15 crc kubenswrapper[4910]: I0105 22:12:15.197633 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"07efd759-c536-425d-938e-a8ccd41706cd","Type":"ContainerStarted","Data":"a0e248b48425380302b1988bb335f1102fb9d344cce326d7af9e5dd2f6475bc5"} Jan 05 22:12:15 crc kubenswrapper[4910]: I0105 22:12:15.197701 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 05 22:12:15 crc kubenswrapper[4910]: I0105 22:12:15.267964 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.26794206 podStartE2EDuration="3.26794206s" podCreationTimestamp="2026-01-05 22:12:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:12:15.267614172 +0000 UTC m=+1266.845111842" watchObservedRunningTime="2026-01-05 22:12:15.26794206 +0000 UTC m=+1266.845439730" Jan 05 22:12:15 crc kubenswrapper[4910]: I0105 22:12:15.489438 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-6c69d8c8f7-7w2gb" Jan 05 22:12:15 crc kubenswrapper[4910]: I0105 22:12:15.553385 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5c84f4b854-d9fkv"] Jan 05 22:12:15 crc kubenswrapper[4910]: I0105 22:12:15.553689 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5c84f4b854-d9fkv" podUID="07d78534-34ad-40ac-963e-605d72b91c82" containerName="neutron-api" containerID="cri-o://ea7d4a42975da37a49580a32164311257ae5b524f07e7d50b91f1491db9df89f" gracePeriod=30 Jan 05 22:12:15 crc kubenswrapper[4910]: I0105 22:12:15.553806 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5c84f4b854-d9fkv" podUID="07d78534-34ad-40ac-963e-605d72b91c82" containerName="neutron-httpd" containerID="cri-o://973f13fb83ce05be7e926666b1043d1de211a5830b1ca48cc71cad4a7b2cb684" gracePeriod=30 Jan 05 22:12:15 crc kubenswrapper[4910]: I0105 22:12:15.604052 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-69bd4bbbcd-dc6q7" Jan 05 22:12:16 crc kubenswrapper[4910]: I0105 22:12:16.231341 4910 generic.go:334] "Generic (PLEG): container finished" podID="07d78534-34ad-40ac-963e-605d72b91c82" containerID="973f13fb83ce05be7e926666b1043d1de211a5830b1ca48cc71cad4a7b2cb684" exitCode=0 Jan 05 22:12:16 crc kubenswrapper[4910]: I0105 22:12:16.231399 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c84f4b854-d9fkv" event={"ID":"07d78534-34ad-40ac-963e-605d72b91c82","Type":"ContainerDied","Data":"973f13fb83ce05be7e926666b1043d1de211a5830b1ca48cc71cad4a7b2cb684"} Jan 05 22:12:17 crc kubenswrapper[4910]: I0105 22:12:17.375938 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-7687b85c5d-l8k6w" Jan 05 22:12:17 crc kubenswrapper[4910]: I0105 22:12:17.379813 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-7687b85c5d-l8k6w" Jan 05 22:12:17 crc kubenswrapper[4910]: I0105 22:12:17.746284 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6b4f5fc4f-tddh2" Jan 05 22:12:17 crc kubenswrapper[4910]: I0105 22:12:17.838345 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bb67c87c9-5lz66"] Jan 05 22:12:17 crc kubenswrapper[4910]: I0105 22:12:17.838839 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7bb67c87c9-5lz66" podUID="b4174802-d96e-4046-8155-b22de9fa615f" containerName="dnsmasq-dns" containerID="cri-o://4166741f153128dd443f3316d4b42e92c05a8452c6b7fa423468d7f4f83e62f3" gracePeriod=10 Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.099312 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.193476 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.253822 4910 generic.go:334] "Generic (PLEG): container finished" podID="b4174802-d96e-4046-8155-b22de9fa615f" containerID="4166741f153128dd443f3316d4b42e92c05a8452c6b7fa423468d7f4f83e62f3" exitCode=0 Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.254505 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="6e9522ec-d5e3-484e-ac80-334021493bb9" containerName="cinder-scheduler" containerID="cri-o://ae0e4f6f3d169570f87594f0c3977324407fc123cc128d0b893f011641a0e1ca" gracePeriod=30 Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.256447 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="6e9522ec-d5e3-484e-ac80-334021493bb9" containerName="probe" containerID="cri-o://315a5f9c520303843cc8ccecd702c7bbdbefb5eff71d5e8495a1344958c30556" gracePeriod=30 Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.256743 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bb67c87c9-5lz66" event={"ID":"b4174802-d96e-4046-8155-b22de9fa615f","Type":"ContainerDied","Data":"4166741f153128dd443f3316d4b42e92c05a8452c6b7fa423468d7f4f83e62f3"} Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.442591 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bb67c87c9-5lz66" Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.628894 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9qwlh\" (UniqueName: \"kubernetes.io/projected/b4174802-d96e-4046-8155-b22de9fa615f-kube-api-access-9qwlh\") pod \"b4174802-d96e-4046-8155-b22de9fa615f\" (UID: \"b4174802-d96e-4046-8155-b22de9fa615f\") " Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.628980 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b4174802-d96e-4046-8155-b22de9fa615f-ovsdbserver-nb\") pod \"b4174802-d96e-4046-8155-b22de9fa615f\" (UID: \"b4174802-d96e-4046-8155-b22de9fa615f\") " Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.629011 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4174802-d96e-4046-8155-b22de9fa615f-config\") pod \"b4174802-d96e-4046-8155-b22de9fa615f\" (UID: \"b4174802-d96e-4046-8155-b22de9fa615f\") " Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.629181 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b4174802-d96e-4046-8155-b22de9fa615f-dns-swift-storage-0\") pod \"b4174802-d96e-4046-8155-b22de9fa615f\" (UID: \"b4174802-d96e-4046-8155-b22de9fa615f\") " Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.629242 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b4174802-d96e-4046-8155-b22de9fa615f-dns-svc\") pod \"b4174802-d96e-4046-8155-b22de9fa615f\" (UID: \"b4174802-d96e-4046-8155-b22de9fa615f\") " Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.629359 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b4174802-d96e-4046-8155-b22de9fa615f-ovsdbserver-sb\") pod \"b4174802-d96e-4046-8155-b22de9fa615f\" (UID: \"b4174802-d96e-4046-8155-b22de9fa615f\") " Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.664176 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4174802-d96e-4046-8155-b22de9fa615f-kube-api-access-9qwlh" (OuterVolumeSpecName: "kube-api-access-9qwlh") pod "b4174802-d96e-4046-8155-b22de9fa615f" (UID: "b4174802-d96e-4046-8155-b22de9fa615f"). InnerVolumeSpecName "kube-api-access-9qwlh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.676009 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4174802-d96e-4046-8155-b22de9fa615f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b4174802-d96e-4046-8155-b22de9fa615f" (UID: "b4174802-d96e-4046-8155-b22de9fa615f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.682797 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4174802-d96e-4046-8155-b22de9fa615f-config" (OuterVolumeSpecName: "config") pod "b4174802-d96e-4046-8155-b22de9fa615f" (UID: "b4174802-d96e-4046-8155-b22de9fa615f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.693822 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4174802-d96e-4046-8155-b22de9fa615f-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "b4174802-d96e-4046-8155-b22de9fa615f" (UID: "b4174802-d96e-4046-8155-b22de9fa615f"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.703990 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4174802-d96e-4046-8155-b22de9fa615f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b4174802-d96e-4046-8155-b22de9fa615f" (UID: "b4174802-d96e-4046-8155-b22de9fa615f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.721192 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4174802-d96e-4046-8155-b22de9fa615f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b4174802-d96e-4046-8155-b22de9fa615f" (UID: "b4174802-d96e-4046-8155-b22de9fa615f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.732790 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b4174802-d96e-4046-8155-b22de9fa615f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.732833 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9qwlh\" (UniqueName: \"kubernetes.io/projected/b4174802-d96e-4046-8155-b22de9fa615f-kube-api-access-9qwlh\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.732853 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b4174802-d96e-4046-8155-b22de9fa615f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.732868 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4174802-d96e-4046-8155-b22de9fa615f-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.733110 4910 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b4174802-d96e-4046-8155-b22de9fa615f-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.733235 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b4174802-d96e-4046-8155-b22de9fa615f-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.880099 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.911305 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5c84f4b854-d9fkv" Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.937527 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07d78534-34ad-40ac-963e-605d72b91c82-combined-ca-bundle\") pod \"07d78534-34ad-40ac-963e-605d72b91c82\" (UID: \"07d78534-34ad-40ac-963e-605d72b91c82\") " Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.937631 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kd4xf\" (UniqueName: \"kubernetes.io/projected/07d78534-34ad-40ac-963e-605d72b91c82-kube-api-access-kd4xf\") pod \"07d78534-34ad-40ac-963e-605d72b91c82\" (UID: \"07d78534-34ad-40ac-963e-605d72b91c82\") " Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.937730 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/07d78534-34ad-40ac-963e-605d72b91c82-config\") pod \"07d78534-34ad-40ac-963e-605d72b91c82\" (UID: \"07d78534-34ad-40ac-963e-605d72b91c82\") " Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.937773 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/07d78534-34ad-40ac-963e-605d72b91c82-httpd-config\") pod \"07d78534-34ad-40ac-963e-605d72b91c82\" (UID: \"07d78534-34ad-40ac-963e-605d72b91c82\") " Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.937796 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/07d78534-34ad-40ac-963e-605d72b91c82-ovndb-tls-certs\") pod \"07d78534-34ad-40ac-963e-605d72b91c82\" (UID: \"07d78534-34ad-40ac-963e-605d72b91c82\") " Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.946131 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07d78534-34ad-40ac-963e-605d72b91c82-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "07d78534-34ad-40ac-963e-605d72b91c82" (UID: "07d78534-34ad-40ac-963e-605d72b91c82"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.962419 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07d78534-34ad-40ac-963e-605d72b91c82-kube-api-access-kd4xf" (OuterVolumeSpecName: "kube-api-access-kd4xf") pod "07d78534-34ad-40ac-963e-605d72b91c82" (UID: "07d78534-34ad-40ac-963e-605d72b91c82"). InnerVolumeSpecName "kube-api-access-kd4xf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:12:18 crc kubenswrapper[4910]: I0105 22:12:18.963348 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.043234 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07d78534-34ad-40ac-963e-605d72b91c82-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "07d78534-34ad-40ac-963e-605d72b91c82" (UID: "07d78534-34ad-40ac-963e-605d72b91c82"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.043532 4910 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/07d78534-34ad-40ac-963e-605d72b91c82-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.043563 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07d78534-34ad-40ac-963e-605d72b91c82-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.043575 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kd4xf\" (UniqueName: \"kubernetes.io/projected/07d78534-34ad-40ac-963e-605d72b91c82-kube-api-access-kd4xf\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.059030 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-69bd4bbbcd-dc6q7"] Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.059495 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-69bd4bbbcd-dc6q7" podUID="b9143cb8-ef34-42b4-b056-ea869bd675b7" containerName="barbican-api-log" containerID="cri-o://f1469fc930fac9bbb9f9a5fc7a9e0301d0114b166e3cae644d06b17f2cd866f2" gracePeriod=30 Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.062725 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-69bd4bbbcd-dc6q7" podUID="b9143cb8-ef34-42b4-b056-ea869bd675b7" containerName="barbican-api" containerID="cri-o://af446d955a631a963b7cdf7ecdd35907c80360de99632f3340cd2d4689dfc5cf" gracePeriod=30 Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.107916 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07d78534-34ad-40ac-963e-605d72b91c82-config" (OuterVolumeSpecName: "config") pod "07d78534-34ad-40ac-963e-605d72b91c82" (UID: "07d78534-34ad-40ac-963e-605d72b91c82"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.114317 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07d78534-34ad-40ac-963e-605d72b91c82-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "07d78534-34ad-40ac-963e-605d72b91c82" (UID: "07d78534-34ad-40ac-963e-605d72b91c82"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.148247 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/07d78534-34ad-40ac-963e-605d72b91c82-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.148525 4910 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/07d78534-34ad-40ac-963e-605d72b91c82-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.271942 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bb67c87c9-5lz66" event={"ID":"b4174802-d96e-4046-8155-b22de9fa615f","Type":"ContainerDied","Data":"cd9d9b719a1ff22f45f64ecf4babd7d6b3f568a59c29630240af7004f3e00e71"} Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.272301 4910 scope.go:117] "RemoveContainer" containerID="4166741f153128dd443f3316d4b42e92c05a8452c6b7fa423468d7f4f83e62f3" Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.272757 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bb67c87c9-5lz66" Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.277782 4910 generic.go:334] "Generic (PLEG): container finished" podID="07d78534-34ad-40ac-963e-605d72b91c82" containerID="ea7d4a42975da37a49580a32164311257ae5b524f07e7d50b91f1491db9df89f" exitCode=0 Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.277908 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c84f4b854-d9fkv" event={"ID":"07d78534-34ad-40ac-963e-605d72b91c82","Type":"ContainerDied","Data":"ea7d4a42975da37a49580a32164311257ae5b524f07e7d50b91f1491db9df89f"} Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.277855 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5c84f4b854-d9fkv" Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.278504 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c84f4b854-d9fkv" event={"ID":"07d78534-34ad-40ac-963e-605d72b91c82","Type":"ContainerDied","Data":"05ee7aff1e282fd692216dff37c458555df5a52be809c14f544c92b8b3ae9cf0"} Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.286386 4910 generic.go:334] "Generic (PLEG): container finished" podID="6e9522ec-d5e3-484e-ac80-334021493bb9" containerID="315a5f9c520303843cc8ccecd702c7bbdbefb5eff71d5e8495a1344958c30556" exitCode=0 Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.286454 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"6e9522ec-d5e3-484e-ac80-334021493bb9","Type":"ContainerDied","Data":"315a5f9c520303843cc8ccecd702c7bbdbefb5eff71d5e8495a1344958c30556"} Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.289183 4910 generic.go:334] "Generic (PLEG): container finished" podID="b9143cb8-ef34-42b4-b056-ea869bd675b7" containerID="f1469fc930fac9bbb9f9a5fc7a9e0301d0114b166e3cae644d06b17f2cd866f2" exitCode=143 Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.289240 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-69bd4bbbcd-dc6q7" event={"ID":"b9143cb8-ef34-42b4-b056-ea869bd675b7","Type":"ContainerDied","Data":"f1469fc930fac9bbb9f9a5fc7a9e0301d0114b166e3cae644d06b17f2cd866f2"} Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.305417 4910 scope.go:117] "RemoveContainer" containerID="f57bdcc6c7bf04d10da5300531991fee00308faf910cbfac9ee7f4252d9907dc" Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.309102 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bb67c87c9-5lz66"] Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.333333 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7bb67c87c9-5lz66"] Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.337451 4910 scope.go:117] "RemoveContainer" containerID="973f13fb83ce05be7e926666b1043d1de211a5830b1ca48cc71cad4a7b2cb684" Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.344588 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5c84f4b854-d9fkv"] Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.355314 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-5c84f4b854-d9fkv"] Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.390311 4910 scope.go:117] "RemoveContainer" containerID="ea7d4a42975da37a49580a32164311257ae5b524f07e7d50b91f1491db9df89f" Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.428320 4910 scope.go:117] "RemoveContainer" containerID="973f13fb83ce05be7e926666b1043d1de211a5830b1ca48cc71cad4a7b2cb684" Jan 05 22:12:19 crc kubenswrapper[4910]: E0105 22:12:19.428860 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"973f13fb83ce05be7e926666b1043d1de211a5830b1ca48cc71cad4a7b2cb684\": container with ID starting with 973f13fb83ce05be7e926666b1043d1de211a5830b1ca48cc71cad4a7b2cb684 not found: ID does not exist" containerID="973f13fb83ce05be7e926666b1043d1de211a5830b1ca48cc71cad4a7b2cb684" Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.428925 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"973f13fb83ce05be7e926666b1043d1de211a5830b1ca48cc71cad4a7b2cb684"} err="failed to get container status \"973f13fb83ce05be7e926666b1043d1de211a5830b1ca48cc71cad4a7b2cb684\": rpc error: code = NotFound desc = could not find container \"973f13fb83ce05be7e926666b1043d1de211a5830b1ca48cc71cad4a7b2cb684\": container with ID starting with 973f13fb83ce05be7e926666b1043d1de211a5830b1ca48cc71cad4a7b2cb684 not found: ID does not exist" Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.428969 4910 scope.go:117] "RemoveContainer" containerID="ea7d4a42975da37a49580a32164311257ae5b524f07e7d50b91f1491db9df89f" Jan 05 22:12:19 crc kubenswrapper[4910]: E0105 22:12:19.429340 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ea7d4a42975da37a49580a32164311257ae5b524f07e7d50b91f1491db9df89f\": container with ID starting with ea7d4a42975da37a49580a32164311257ae5b524f07e7d50b91f1491db9df89f not found: ID does not exist" containerID="ea7d4a42975da37a49580a32164311257ae5b524f07e7d50b91f1491db9df89f" Jan 05 22:12:19 crc kubenswrapper[4910]: I0105 22:12:19.429396 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea7d4a42975da37a49580a32164311257ae5b524f07e7d50b91f1491db9df89f"} err="failed to get container status \"ea7d4a42975da37a49580a32164311257ae5b524f07e7d50b91f1491db9df89f\": rpc error: code = NotFound desc = could not find container \"ea7d4a42975da37a49580a32164311257ae5b524f07e7d50b91f1491db9df89f\": container with ID starting with ea7d4a42975da37a49580a32164311257ae5b524f07e7d50b91f1491db9df89f not found: ID does not exist" Jan 05 22:12:20 crc kubenswrapper[4910]: I0105 22:12:20.732233 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07d78534-34ad-40ac-963e-605d72b91c82" path="/var/lib/kubelet/pods/07d78534-34ad-40ac-963e-605d72b91c82/volumes" Jan 05 22:12:20 crc kubenswrapper[4910]: I0105 22:12:20.733277 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4174802-d96e-4046-8155-b22de9fa615f" path="/var/lib/kubelet/pods/b4174802-d96e-4046-8155-b22de9fa615f/volumes" Jan 05 22:12:21 crc kubenswrapper[4910]: I0105 22:12:21.104051 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-7bbfdb8fcf-zlpw8" Jan 05 22:12:22 crc kubenswrapper[4910]: I0105 22:12:22.351684 4910 generic.go:334] "Generic (PLEG): container finished" podID="b9143cb8-ef34-42b4-b056-ea869bd675b7" containerID="af446d955a631a963b7cdf7ecdd35907c80360de99632f3340cd2d4689dfc5cf" exitCode=0 Jan 05 22:12:22 crc kubenswrapper[4910]: I0105 22:12:22.351754 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-69bd4bbbcd-dc6q7" event={"ID":"b9143cb8-ef34-42b4-b056-ea869bd675b7","Type":"ContainerDied","Data":"af446d955a631a963b7cdf7ecdd35907c80360de99632f3340cd2d4689dfc5cf"} Jan 05 22:12:22 crc kubenswrapper[4910]: I0105 22:12:22.687334 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-69bd4bbbcd-dc6q7" Jan 05 22:12:22 crc kubenswrapper[4910]: I0105 22:12:22.719156 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dschh\" (UniqueName: \"kubernetes.io/projected/b9143cb8-ef34-42b4-b056-ea869bd675b7-kube-api-access-dschh\") pod \"b9143cb8-ef34-42b4-b056-ea869bd675b7\" (UID: \"b9143cb8-ef34-42b4-b056-ea869bd675b7\") " Jan 05 22:12:22 crc kubenswrapper[4910]: I0105 22:12:22.719469 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9143cb8-ef34-42b4-b056-ea869bd675b7-logs\") pod \"b9143cb8-ef34-42b4-b056-ea869bd675b7\" (UID: \"b9143cb8-ef34-42b4-b056-ea869bd675b7\") " Jan 05 22:12:22 crc kubenswrapper[4910]: I0105 22:12:22.719647 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9143cb8-ef34-42b4-b056-ea869bd675b7-config-data\") pod \"b9143cb8-ef34-42b4-b056-ea869bd675b7\" (UID: \"b9143cb8-ef34-42b4-b056-ea869bd675b7\") " Jan 05 22:12:22 crc kubenswrapper[4910]: I0105 22:12:22.720030 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b9143cb8-ef34-42b4-b056-ea869bd675b7-logs" (OuterVolumeSpecName: "logs") pod "b9143cb8-ef34-42b4-b056-ea869bd675b7" (UID: "b9143cb8-ef34-42b4-b056-ea869bd675b7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:12:22 crc kubenswrapper[4910]: I0105 22:12:22.720138 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9143cb8-ef34-42b4-b056-ea869bd675b7-combined-ca-bundle\") pod \"b9143cb8-ef34-42b4-b056-ea869bd675b7\" (UID: \"b9143cb8-ef34-42b4-b056-ea869bd675b7\") " Jan 05 22:12:22 crc kubenswrapper[4910]: I0105 22:12:22.720225 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b9143cb8-ef34-42b4-b056-ea869bd675b7-config-data-custom\") pod \"b9143cb8-ef34-42b4-b056-ea869bd675b7\" (UID: \"b9143cb8-ef34-42b4-b056-ea869bd675b7\") " Jan 05 22:12:22 crc kubenswrapper[4910]: I0105 22:12:22.721554 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9143cb8-ef34-42b4-b056-ea869bd675b7-logs\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:22 crc kubenswrapper[4910]: I0105 22:12:22.726280 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9143cb8-ef34-42b4-b056-ea869bd675b7-kube-api-access-dschh" (OuterVolumeSpecName: "kube-api-access-dschh") pod "b9143cb8-ef34-42b4-b056-ea869bd675b7" (UID: "b9143cb8-ef34-42b4-b056-ea869bd675b7"). InnerVolumeSpecName "kube-api-access-dschh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:12:22 crc kubenswrapper[4910]: I0105 22:12:22.738249 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9143cb8-ef34-42b4-b056-ea869bd675b7-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "b9143cb8-ef34-42b4-b056-ea869bd675b7" (UID: "b9143cb8-ef34-42b4-b056-ea869bd675b7"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:22 crc kubenswrapper[4910]: I0105 22:12:22.764209 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9143cb8-ef34-42b4-b056-ea869bd675b7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b9143cb8-ef34-42b4-b056-ea869bd675b7" (UID: "b9143cb8-ef34-42b4-b056-ea869bd675b7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:22 crc kubenswrapper[4910]: I0105 22:12:22.804288 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9143cb8-ef34-42b4-b056-ea869bd675b7-config-data" (OuterVolumeSpecName: "config-data") pod "b9143cb8-ef34-42b4-b056-ea869bd675b7" (UID: "b9143cb8-ef34-42b4-b056-ea869bd675b7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:22 crc kubenswrapper[4910]: I0105 22:12:22.825297 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9143cb8-ef34-42b4-b056-ea869bd675b7-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:22 crc kubenswrapper[4910]: I0105 22:12:22.825336 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9143cb8-ef34-42b4-b056-ea869bd675b7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:22 crc kubenswrapper[4910]: I0105 22:12:22.825347 4910 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b9143cb8-ef34-42b4-b056-ea869bd675b7-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:22 crc kubenswrapper[4910]: I0105 22:12:22.825357 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dschh\" (UniqueName: \"kubernetes.io/projected/b9143cb8-ef34-42b4-b056-ea869bd675b7-kube-api-access-dschh\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:22 crc kubenswrapper[4910]: I0105 22:12:22.895888 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 05 22:12:22 crc kubenswrapper[4910]: I0105 22:12:22.926453 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6e9522ec-d5e3-484e-ac80-334021493bb9-scripts\") pod \"6e9522ec-d5e3-484e-ac80-334021493bb9\" (UID: \"6e9522ec-d5e3-484e-ac80-334021493bb9\") " Jan 05 22:12:22 crc kubenswrapper[4910]: I0105 22:12:22.926612 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hdgng\" (UniqueName: \"kubernetes.io/projected/6e9522ec-d5e3-484e-ac80-334021493bb9-kube-api-access-hdgng\") pod \"6e9522ec-d5e3-484e-ac80-334021493bb9\" (UID: \"6e9522ec-d5e3-484e-ac80-334021493bb9\") " Jan 05 22:12:22 crc kubenswrapper[4910]: I0105 22:12:22.926710 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6e9522ec-d5e3-484e-ac80-334021493bb9-etc-machine-id\") pod \"6e9522ec-d5e3-484e-ac80-334021493bb9\" (UID: \"6e9522ec-d5e3-484e-ac80-334021493bb9\") " Jan 05 22:12:22 crc kubenswrapper[4910]: I0105 22:12:22.926792 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6e9522ec-d5e3-484e-ac80-334021493bb9-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "6e9522ec-d5e3-484e-ac80-334021493bb9" (UID: "6e9522ec-d5e3-484e-ac80-334021493bb9"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:12:22 crc kubenswrapper[4910]: I0105 22:12:22.926883 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e9522ec-d5e3-484e-ac80-334021493bb9-config-data\") pod \"6e9522ec-d5e3-484e-ac80-334021493bb9\" (UID: \"6e9522ec-d5e3-484e-ac80-334021493bb9\") " Jan 05 22:12:22 crc kubenswrapper[4910]: I0105 22:12:22.927042 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e9522ec-d5e3-484e-ac80-334021493bb9-combined-ca-bundle\") pod \"6e9522ec-d5e3-484e-ac80-334021493bb9\" (UID: \"6e9522ec-d5e3-484e-ac80-334021493bb9\") " Jan 05 22:12:22 crc kubenswrapper[4910]: I0105 22:12:22.927510 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6e9522ec-d5e3-484e-ac80-334021493bb9-config-data-custom\") pod \"6e9522ec-d5e3-484e-ac80-334021493bb9\" (UID: \"6e9522ec-d5e3-484e-ac80-334021493bb9\") " Jan 05 22:12:22 crc kubenswrapper[4910]: I0105 22:12:22.927971 4910 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6e9522ec-d5e3-484e-ac80-334021493bb9-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:22 crc kubenswrapper[4910]: I0105 22:12:22.934373 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e9522ec-d5e3-484e-ac80-334021493bb9-scripts" (OuterVolumeSpecName: "scripts") pod "6e9522ec-d5e3-484e-ac80-334021493bb9" (UID: "6e9522ec-d5e3-484e-ac80-334021493bb9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:22 crc kubenswrapper[4910]: I0105 22:12:22.939307 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e9522ec-d5e3-484e-ac80-334021493bb9-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "6e9522ec-d5e3-484e-ac80-334021493bb9" (UID: "6e9522ec-d5e3-484e-ac80-334021493bb9"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:22 crc kubenswrapper[4910]: I0105 22:12:22.941490 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e9522ec-d5e3-484e-ac80-334021493bb9-kube-api-access-hdgng" (OuterVolumeSpecName: "kube-api-access-hdgng") pod "6e9522ec-d5e3-484e-ac80-334021493bb9" (UID: "6e9522ec-d5e3-484e-ac80-334021493bb9"). InnerVolumeSpecName "kube-api-access-hdgng". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.012759 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e9522ec-d5e3-484e-ac80-334021493bb9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6e9522ec-d5e3-484e-ac80-334021493bb9" (UID: "6e9522ec-d5e3-484e-ac80-334021493bb9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.030381 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6e9522ec-d5e3-484e-ac80-334021493bb9-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.030415 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hdgng\" (UniqueName: \"kubernetes.io/projected/6e9522ec-d5e3-484e-ac80-334021493bb9-kube-api-access-hdgng\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.030426 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e9522ec-d5e3-484e-ac80-334021493bb9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.030437 4910 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6e9522ec-d5e3-484e-ac80-334021493bb9-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.052412 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e9522ec-d5e3-484e-ac80-334021493bb9-config-data" (OuterVolumeSpecName: "config-data") pod "6e9522ec-d5e3-484e-ac80-334021493bb9" (UID: "6e9522ec-d5e3-484e-ac80-334021493bb9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.131657 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e9522ec-d5e3-484e-ac80-334021493bb9-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.361941 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-69bd4bbbcd-dc6q7" event={"ID":"b9143cb8-ef34-42b4-b056-ea869bd675b7","Type":"ContainerDied","Data":"652f042e30d65efb5115b879a66a1912016f7403fe17dcb3b09a017b1ccbc6c0"} Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.361973 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-69bd4bbbcd-dc6q7" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.362344 4910 scope.go:117] "RemoveContainer" containerID="af446d955a631a963b7cdf7ecdd35907c80360de99632f3340cd2d4689dfc5cf" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.364543 4910 generic.go:334] "Generic (PLEG): container finished" podID="6e9522ec-d5e3-484e-ac80-334021493bb9" containerID="ae0e4f6f3d169570f87594f0c3977324407fc123cc128d0b893f011641a0e1ca" exitCode=0 Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.364610 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"6e9522ec-d5e3-484e-ac80-334021493bb9","Type":"ContainerDied","Data":"ae0e4f6f3d169570f87594f0c3977324407fc123cc128d0b893f011641a0e1ca"} Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.364643 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"6e9522ec-d5e3-484e-ac80-334021493bb9","Type":"ContainerDied","Data":"9dd02f25375004abe1c1d22368fb50e08b4ade3dc4d18bacb13b5d4853ea31b1"} Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.364731 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.385406 4910 scope.go:117] "RemoveContainer" containerID="f1469fc930fac9bbb9f9a5fc7a9e0301d0114b166e3cae644d06b17f2cd866f2" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.433452 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-69bd4bbbcd-dc6q7"] Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.441332 4910 scope.go:117] "RemoveContainer" containerID="315a5f9c520303843cc8ccecd702c7bbdbefb5eff71d5e8495a1344958c30556" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.444160 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-69bd4bbbcd-dc6q7"] Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.457368 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.473530 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.482067 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 05 22:12:23 crc kubenswrapper[4910]: E0105 22:12:23.482929 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e9522ec-d5e3-484e-ac80-334021493bb9" containerName="cinder-scheduler" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.483036 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e9522ec-d5e3-484e-ac80-334021493bb9" containerName="cinder-scheduler" Jan 05 22:12:23 crc kubenswrapper[4910]: E0105 22:12:23.483177 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e9522ec-d5e3-484e-ac80-334021493bb9" containerName="probe" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.483274 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e9522ec-d5e3-484e-ac80-334021493bb9" containerName="probe" Jan 05 22:12:23 crc kubenswrapper[4910]: E0105 22:12:23.483359 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9143cb8-ef34-42b4-b056-ea869bd675b7" containerName="barbican-api" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.483450 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9143cb8-ef34-42b4-b056-ea869bd675b7" containerName="barbican-api" Jan 05 22:12:23 crc kubenswrapper[4910]: E0105 22:12:23.483525 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4174802-d96e-4046-8155-b22de9fa615f" containerName="dnsmasq-dns" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.483599 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4174802-d96e-4046-8155-b22de9fa615f" containerName="dnsmasq-dns" Jan 05 22:12:23 crc kubenswrapper[4910]: E0105 22:12:23.483683 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4174802-d96e-4046-8155-b22de9fa615f" containerName="init" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.483754 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4174802-d96e-4046-8155-b22de9fa615f" containerName="init" Jan 05 22:12:23 crc kubenswrapper[4910]: E0105 22:12:23.483845 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9143cb8-ef34-42b4-b056-ea869bd675b7" containerName="barbican-api-log" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.483931 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9143cb8-ef34-42b4-b056-ea869bd675b7" containerName="barbican-api-log" Jan 05 22:12:23 crc kubenswrapper[4910]: E0105 22:12:23.484011 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07d78534-34ad-40ac-963e-605d72b91c82" containerName="neutron-httpd" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.484083 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="07d78534-34ad-40ac-963e-605d72b91c82" containerName="neutron-httpd" Jan 05 22:12:23 crc kubenswrapper[4910]: E0105 22:12:23.484192 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07d78534-34ad-40ac-963e-605d72b91c82" containerName="neutron-api" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.484273 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="07d78534-34ad-40ac-963e-605d72b91c82" containerName="neutron-api" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.484615 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e9522ec-d5e3-484e-ac80-334021493bb9" containerName="probe" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.484718 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e9522ec-d5e3-484e-ac80-334021493bb9" containerName="cinder-scheduler" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.484807 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="07d78534-34ad-40ac-963e-605d72b91c82" containerName="neutron-api" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.484890 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9143cb8-ef34-42b4-b056-ea869bd675b7" containerName="barbican-api" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.484961 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4174802-d96e-4046-8155-b22de9fa615f" containerName="dnsmasq-dns" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.485044 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9143cb8-ef34-42b4-b056-ea869bd675b7" containerName="barbican-api-log" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.485139 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="07d78534-34ad-40ac-963e-605d72b91c82" containerName="neutron-httpd" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.487518 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.490799 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.491063 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.504296 4910 scope.go:117] "RemoveContainer" containerID="ae0e4f6f3d169570f87594f0c3977324407fc123cc128d0b893f011641a0e1ca" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.540764 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.540825 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wsm8w\" (UniqueName: \"kubernetes.io/projected/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-kube-api-access-wsm8w\") pod \"cinder-scheduler-0\" (UID: \"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.540859 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.540922 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.540994 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-config-data\") pod \"cinder-scheduler-0\" (UID: \"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.541055 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-scripts\") pod \"cinder-scheduler-0\" (UID: \"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.551828 4910 scope.go:117] "RemoveContainer" containerID="315a5f9c520303843cc8ccecd702c7bbdbefb5eff71d5e8495a1344958c30556" Jan 05 22:12:23 crc kubenswrapper[4910]: E0105 22:12:23.552592 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"315a5f9c520303843cc8ccecd702c7bbdbefb5eff71d5e8495a1344958c30556\": container with ID starting with 315a5f9c520303843cc8ccecd702c7bbdbefb5eff71d5e8495a1344958c30556 not found: ID does not exist" containerID="315a5f9c520303843cc8ccecd702c7bbdbefb5eff71d5e8495a1344958c30556" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.552721 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"315a5f9c520303843cc8ccecd702c7bbdbefb5eff71d5e8495a1344958c30556"} err="failed to get container status \"315a5f9c520303843cc8ccecd702c7bbdbefb5eff71d5e8495a1344958c30556\": rpc error: code = NotFound desc = could not find container \"315a5f9c520303843cc8ccecd702c7bbdbefb5eff71d5e8495a1344958c30556\": container with ID starting with 315a5f9c520303843cc8ccecd702c7bbdbefb5eff71d5e8495a1344958c30556 not found: ID does not exist" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.552817 4910 scope.go:117] "RemoveContainer" containerID="ae0e4f6f3d169570f87594f0c3977324407fc123cc128d0b893f011641a0e1ca" Jan 05 22:12:23 crc kubenswrapper[4910]: E0105 22:12:23.553110 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ae0e4f6f3d169570f87594f0c3977324407fc123cc128d0b893f011641a0e1ca\": container with ID starting with ae0e4f6f3d169570f87594f0c3977324407fc123cc128d0b893f011641a0e1ca not found: ID does not exist" containerID="ae0e4f6f3d169570f87594f0c3977324407fc123cc128d0b893f011641a0e1ca" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.553227 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae0e4f6f3d169570f87594f0c3977324407fc123cc128d0b893f011641a0e1ca"} err="failed to get container status \"ae0e4f6f3d169570f87594f0c3977324407fc123cc128d0b893f011641a0e1ca\": rpc error: code = NotFound desc = could not find container \"ae0e4f6f3d169570f87594f0c3977324407fc123cc128d0b893f011641a0e1ca\": container with ID starting with ae0e4f6f3d169570f87594f0c3977324407fc123cc128d0b893f011641a0e1ca not found: ID does not exist" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.603371 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.605717 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.609292 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.609365 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-rsbc7" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.611163 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.643562 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a-combined-ca-bundle\") pod \"openstackclient\" (UID: \"9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a\") " pod="openstack/openstackclient" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.643641 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a-openstack-config-secret\") pod \"openstackclient\" (UID: \"9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a\") " pod="openstack/openstackclient" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.643688 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-config-data\") pod \"cinder-scheduler-0\" (UID: \"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.643719 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a-openstack-config\") pod \"openstackclient\" (UID: \"9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a\") " pod="openstack/openstackclient" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.644064 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-scripts\") pod \"cinder-scheduler-0\" (UID: \"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.644376 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4bg4z\" (UniqueName: \"kubernetes.io/projected/9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a-kube-api-access-4bg4z\") pod \"openstackclient\" (UID: \"9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a\") " pod="openstack/openstackclient" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.644440 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.644516 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wsm8w\" (UniqueName: \"kubernetes.io/projected/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-kube-api-access-wsm8w\") pod \"cinder-scheduler-0\" (UID: \"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.644568 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.644571 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.645292 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.647495 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.651827 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-scripts\") pod \"cinder-scheduler-0\" (UID: \"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.655879 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.656863 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-config-data\") pod \"cinder-scheduler-0\" (UID: \"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.664152 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wsm8w\" (UniqueName: \"kubernetes.io/projected/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-kube-api-access-wsm8w\") pod \"cinder-scheduler-0\" (UID: \"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.664558 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e\") " pod="openstack/cinder-scheduler-0" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.747478 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a-combined-ca-bundle\") pod \"openstackclient\" (UID: \"9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a\") " pod="openstack/openstackclient" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.747752 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a-openstack-config-secret\") pod \"openstackclient\" (UID: \"9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a\") " pod="openstack/openstackclient" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.747842 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a-openstack-config\") pod \"openstackclient\" (UID: \"9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a\") " pod="openstack/openstackclient" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.748040 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4bg4z\" (UniqueName: \"kubernetes.io/projected/9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a-kube-api-access-4bg4z\") pod \"openstackclient\" (UID: \"9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a\") " pod="openstack/openstackclient" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.749763 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a-openstack-config\") pod \"openstackclient\" (UID: \"9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a\") " pod="openstack/openstackclient" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.754534 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a-openstack-config-secret\") pod \"openstackclient\" (UID: \"9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a\") " pod="openstack/openstackclient" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.755646 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a-combined-ca-bundle\") pod \"openstackclient\" (UID: \"9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a\") " pod="openstack/openstackclient" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.772938 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4bg4z\" (UniqueName: \"kubernetes.io/projected/9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a-kube-api-access-4bg4z\") pod \"openstackclient\" (UID: \"9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a\") " pod="openstack/openstackclient" Jan 05 22:12:23 crc kubenswrapper[4910]: I0105 22:12:23.837014 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 05 22:12:24 crc kubenswrapper[4910]: I0105 22:12:24.031391 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 05 22:12:24 crc kubenswrapper[4910]: I0105 22:12:24.330027 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 05 22:12:24 crc kubenswrapper[4910]: W0105 22:12:24.352708 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod19d63cd6_26c3_439b_a9f6_5a53f27d9e0e.slice/crio-318b85531003e11d9a7b6e74e370ab105779b4933140d0bdb6ccec5e465ce41c WatchSource:0}: Error finding container 318b85531003e11d9a7b6e74e370ab105779b4933140d0bdb6ccec5e465ce41c: Status 404 returned error can't find the container with id 318b85531003e11d9a7b6e74e370ab105779b4933140d0bdb6ccec5e465ce41c Jan 05 22:12:24 crc kubenswrapper[4910]: I0105 22:12:24.408022 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e","Type":"ContainerStarted","Data":"318b85531003e11d9a7b6e74e370ab105779b4933140d0bdb6ccec5e465ce41c"} Jan 05 22:12:24 crc kubenswrapper[4910]: I0105 22:12:24.508735 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 05 22:12:24 crc kubenswrapper[4910]: W0105 22:12:24.520397 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9e934b0f_784d_4e7b_8c78_aa5d7e35aa0a.slice/crio-4b20a2e422f68a24b81535b924153e253cd13638a90805647728a1a6b65288d3 WatchSource:0}: Error finding container 4b20a2e422f68a24b81535b924153e253cd13638a90805647728a1a6b65288d3: Status 404 returned error can't find the container with id 4b20a2e422f68a24b81535b924153e253cd13638a90805647728a1a6b65288d3 Jan 05 22:12:24 crc kubenswrapper[4910]: I0105 22:12:24.735096 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e9522ec-d5e3-484e-ac80-334021493bb9" path="/var/lib/kubelet/pods/6e9522ec-d5e3-484e-ac80-334021493bb9/volumes" Jan 05 22:12:24 crc kubenswrapper[4910]: I0105 22:12:24.736956 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9143cb8-ef34-42b4-b056-ea869bd675b7" path="/var/lib/kubelet/pods/b9143cb8-ef34-42b4-b056-ea869bd675b7/volumes" Jan 05 22:12:25 crc kubenswrapper[4910]: I0105 22:12:25.064980 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 05 22:12:25 crc kubenswrapper[4910]: I0105 22:12:25.423076 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a","Type":"ContainerStarted","Data":"4b20a2e422f68a24b81535b924153e253cd13638a90805647728a1a6b65288d3"} Jan 05 22:12:25 crc kubenswrapper[4910]: I0105 22:12:25.428692 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e","Type":"ContainerStarted","Data":"aa48fa221aab1fca8baf355d7d5b238e363506882c0807675e58c0556680cf81"} Jan 05 22:12:26 crc kubenswrapper[4910]: I0105 22:12:26.440762 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e","Type":"ContainerStarted","Data":"e56637041d9755fd6fda8b6ee2207de4c4a054e4001e101db38b784bf6a8eb7a"} Jan 05 22:12:26 crc kubenswrapper[4910]: I0105 22:12:26.469500 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.469479803 podStartE2EDuration="3.469479803s" podCreationTimestamp="2026-01-05 22:12:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:12:26.467262387 +0000 UTC m=+1278.044760057" watchObservedRunningTime="2026-01-05 22:12:26.469479803 +0000 UTC m=+1278.046977473" Jan 05 22:12:28 crc kubenswrapper[4910]: I0105 22:12:28.837791 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 05 22:12:29 crc kubenswrapper[4910]: I0105 22:12:29.459714 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-b7b888cd9-zwrvg"] Jan 05 22:12:29 crc kubenswrapper[4910]: I0105 22:12:29.461338 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-b7b888cd9-zwrvg" Jan 05 22:12:29 crc kubenswrapper[4910]: I0105 22:12:29.463649 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Jan 05 22:12:29 crc kubenswrapper[4910]: I0105 22:12:29.463928 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 05 22:12:29 crc kubenswrapper[4910]: I0105 22:12:29.464065 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Jan 05 22:12:29 crc kubenswrapper[4910]: I0105 22:12:29.481729 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-b7b888cd9-zwrvg"] Jan 05 22:12:29 crc kubenswrapper[4910]: I0105 22:12:29.530754 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24f2eef4-3eac-4643-bffa-0747afae172a-config-data\") pod \"swift-proxy-b7b888cd9-zwrvg\" (UID: \"24f2eef4-3eac-4643-bffa-0747afae172a\") " pod="openstack/swift-proxy-b7b888cd9-zwrvg" Jan 05 22:12:29 crc kubenswrapper[4910]: I0105 22:12:29.530835 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/24f2eef4-3eac-4643-bffa-0747afae172a-internal-tls-certs\") pod \"swift-proxy-b7b888cd9-zwrvg\" (UID: \"24f2eef4-3eac-4643-bffa-0747afae172a\") " pod="openstack/swift-proxy-b7b888cd9-zwrvg" Jan 05 22:12:29 crc kubenswrapper[4910]: I0105 22:12:29.530987 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/24f2eef4-3eac-4643-bffa-0747afae172a-run-httpd\") pod \"swift-proxy-b7b888cd9-zwrvg\" (UID: \"24f2eef4-3eac-4643-bffa-0747afae172a\") " pod="openstack/swift-proxy-b7b888cd9-zwrvg" Jan 05 22:12:29 crc kubenswrapper[4910]: I0105 22:12:29.531014 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6p9tt\" (UniqueName: \"kubernetes.io/projected/24f2eef4-3eac-4643-bffa-0747afae172a-kube-api-access-6p9tt\") pod \"swift-proxy-b7b888cd9-zwrvg\" (UID: \"24f2eef4-3eac-4643-bffa-0747afae172a\") " pod="openstack/swift-proxy-b7b888cd9-zwrvg" Jan 05 22:12:29 crc kubenswrapper[4910]: I0105 22:12:29.531073 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/24f2eef4-3eac-4643-bffa-0747afae172a-etc-swift\") pod \"swift-proxy-b7b888cd9-zwrvg\" (UID: \"24f2eef4-3eac-4643-bffa-0747afae172a\") " pod="openstack/swift-proxy-b7b888cd9-zwrvg" Jan 05 22:12:29 crc kubenswrapper[4910]: I0105 22:12:29.531196 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/24f2eef4-3eac-4643-bffa-0747afae172a-log-httpd\") pod \"swift-proxy-b7b888cd9-zwrvg\" (UID: \"24f2eef4-3eac-4643-bffa-0747afae172a\") " pod="openstack/swift-proxy-b7b888cd9-zwrvg" Jan 05 22:12:29 crc kubenswrapper[4910]: I0105 22:12:29.531258 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/24f2eef4-3eac-4643-bffa-0747afae172a-public-tls-certs\") pod \"swift-proxy-b7b888cd9-zwrvg\" (UID: \"24f2eef4-3eac-4643-bffa-0747afae172a\") " pod="openstack/swift-proxy-b7b888cd9-zwrvg" Jan 05 22:12:29 crc kubenswrapper[4910]: I0105 22:12:29.532063 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24f2eef4-3eac-4643-bffa-0747afae172a-combined-ca-bundle\") pod \"swift-proxy-b7b888cd9-zwrvg\" (UID: \"24f2eef4-3eac-4643-bffa-0747afae172a\") " pod="openstack/swift-proxy-b7b888cd9-zwrvg" Jan 05 22:12:29 crc kubenswrapper[4910]: I0105 22:12:29.635219 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/24f2eef4-3eac-4643-bffa-0747afae172a-run-httpd\") pod \"swift-proxy-b7b888cd9-zwrvg\" (UID: \"24f2eef4-3eac-4643-bffa-0747afae172a\") " pod="openstack/swift-proxy-b7b888cd9-zwrvg" Jan 05 22:12:29 crc kubenswrapper[4910]: I0105 22:12:29.635284 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6p9tt\" (UniqueName: \"kubernetes.io/projected/24f2eef4-3eac-4643-bffa-0747afae172a-kube-api-access-6p9tt\") pod \"swift-proxy-b7b888cd9-zwrvg\" (UID: \"24f2eef4-3eac-4643-bffa-0747afae172a\") " pod="openstack/swift-proxy-b7b888cd9-zwrvg" Jan 05 22:12:29 crc kubenswrapper[4910]: I0105 22:12:29.635337 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/24f2eef4-3eac-4643-bffa-0747afae172a-etc-swift\") pod \"swift-proxy-b7b888cd9-zwrvg\" (UID: \"24f2eef4-3eac-4643-bffa-0747afae172a\") " pod="openstack/swift-proxy-b7b888cd9-zwrvg" Jan 05 22:12:29 crc kubenswrapper[4910]: I0105 22:12:29.635369 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/24f2eef4-3eac-4643-bffa-0747afae172a-log-httpd\") pod \"swift-proxy-b7b888cd9-zwrvg\" (UID: \"24f2eef4-3eac-4643-bffa-0747afae172a\") " pod="openstack/swift-proxy-b7b888cd9-zwrvg" Jan 05 22:12:29 crc kubenswrapper[4910]: I0105 22:12:29.635393 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/24f2eef4-3eac-4643-bffa-0747afae172a-public-tls-certs\") pod \"swift-proxy-b7b888cd9-zwrvg\" (UID: \"24f2eef4-3eac-4643-bffa-0747afae172a\") " pod="openstack/swift-proxy-b7b888cd9-zwrvg" Jan 05 22:12:29 crc kubenswrapper[4910]: I0105 22:12:29.635426 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24f2eef4-3eac-4643-bffa-0747afae172a-combined-ca-bundle\") pod \"swift-proxy-b7b888cd9-zwrvg\" (UID: \"24f2eef4-3eac-4643-bffa-0747afae172a\") " pod="openstack/swift-proxy-b7b888cd9-zwrvg" Jan 05 22:12:29 crc kubenswrapper[4910]: I0105 22:12:29.635465 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24f2eef4-3eac-4643-bffa-0747afae172a-config-data\") pod \"swift-proxy-b7b888cd9-zwrvg\" (UID: \"24f2eef4-3eac-4643-bffa-0747afae172a\") " pod="openstack/swift-proxy-b7b888cd9-zwrvg" Jan 05 22:12:29 crc kubenswrapper[4910]: I0105 22:12:29.635509 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/24f2eef4-3eac-4643-bffa-0747afae172a-internal-tls-certs\") pod \"swift-proxy-b7b888cd9-zwrvg\" (UID: \"24f2eef4-3eac-4643-bffa-0747afae172a\") " pod="openstack/swift-proxy-b7b888cd9-zwrvg" Jan 05 22:12:29 crc kubenswrapper[4910]: I0105 22:12:29.635814 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/24f2eef4-3eac-4643-bffa-0747afae172a-run-httpd\") pod \"swift-proxy-b7b888cd9-zwrvg\" (UID: \"24f2eef4-3eac-4643-bffa-0747afae172a\") " pod="openstack/swift-proxy-b7b888cd9-zwrvg" Jan 05 22:12:29 crc kubenswrapper[4910]: I0105 22:12:29.636152 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/24f2eef4-3eac-4643-bffa-0747afae172a-log-httpd\") pod \"swift-proxy-b7b888cd9-zwrvg\" (UID: \"24f2eef4-3eac-4643-bffa-0747afae172a\") " pod="openstack/swift-proxy-b7b888cd9-zwrvg" Jan 05 22:12:29 crc kubenswrapper[4910]: I0105 22:12:29.644218 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/24f2eef4-3eac-4643-bffa-0747afae172a-public-tls-certs\") pod \"swift-proxy-b7b888cd9-zwrvg\" (UID: \"24f2eef4-3eac-4643-bffa-0747afae172a\") " pod="openstack/swift-proxy-b7b888cd9-zwrvg" Jan 05 22:12:29 crc kubenswrapper[4910]: I0105 22:12:29.644224 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24f2eef4-3eac-4643-bffa-0747afae172a-combined-ca-bundle\") pod \"swift-proxy-b7b888cd9-zwrvg\" (UID: \"24f2eef4-3eac-4643-bffa-0747afae172a\") " pod="openstack/swift-proxy-b7b888cd9-zwrvg" Jan 05 22:12:29 crc kubenswrapper[4910]: I0105 22:12:29.645470 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/24f2eef4-3eac-4643-bffa-0747afae172a-internal-tls-certs\") pod \"swift-proxy-b7b888cd9-zwrvg\" (UID: \"24f2eef4-3eac-4643-bffa-0747afae172a\") " pod="openstack/swift-proxy-b7b888cd9-zwrvg" Jan 05 22:12:29 crc kubenswrapper[4910]: I0105 22:12:29.649596 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24f2eef4-3eac-4643-bffa-0747afae172a-config-data\") pod \"swift-proxy-b7b888cd9-zwrvg\" (UID: \"24f2eef4-3eac-4643-bffa-0747afae172a\") " pod="openstack/swift-proxy-b7b888cd9-zwrvg" Jan 05 22:12:29 crc kubenswrapper[4910]: I0105 22:12:29.653173 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/24f2eef4-3eac-4643-bffa-0747afae172a-etc-swift\") pod \"swift-proxy-b7b888cd9-zwrvg\" (UID: \"24f2eef4-3eac-4643-bffa-0747afae172a\") " pod="openstack/swift-proxy-b7b888cd9-zwrvg" Jan 05 22:12:29 crc kubenswrapper[4910]: I0105 22:12:29.654147 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6p9tt\" (UniqueName: \"kubernetes.io/projected/24f2eef4-3eac-4643-bffa-0747afae172a-kube-api-access-6p9tt\") pod \"swift-proxy-b7b888cd9-zwrvg\" (UID: \"24f2eef4-3eac-4643-bffa-0747afae172a\") " pod="openstack/swift-proxy-b7b888cd9-zwrvg" Jan 05 22:12:29 crc kubenswrapper[4910]: I0105 22:12:29.779627 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-b7b888cd9-zwrvg" Jan 05 22:12:34 crc kubenswrapper[4910]: I0105 22:12:34.148758 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 05 22:12:34 crc kubenswrapper[4910]: I0105 22:12:34.426984 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-b7b888cd9-zwrvg"] Jan 05 22:12:34 crc kubenswrapper[4910]: W0105 22:12:34.428349 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod24f2eef4_3eac_4643_bffa_0747afae172a.slice/crio-0ae3b996825a08cf127b86f2d77bfd5104866a9631526f454e3977b0c8e86d32 WatchSource:0}: Error finding container 0ae3b996825a08cf127b86f2d77bfd5104866a9631526f454e3977b0c8e86d32: Status 404 returned error can't find the container with id 0ae3b996825a08cf127b86f2d77bfd5104866a9631526f454e3977b0c8e86d32 Jan 05 22:12:34 crc kubenswrapper[4910]: I0105 22:12:34.584428 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a","Type":"ContainerStarted","Data":"b0963465056d457d77ba82b41400764fc535aa116bd7004b3c5f6069bc02b174"} Jan 05 22:12:34 crc kubenswrapper[4910]: I0105 22:12:34.585995 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-b7b888cd9-zwrvg" event={"ID":"24f2eef4-3eac-4643-bffa-0747afae172a","Type":"ContainerStarted","Data":"0ae3b996825a08cf127b86f2d77bfd5104866a9631526f454e3977b0c8e86d32"} Jan 05 22:12:34 crc kubenswrapper[4910]: I0105 22:12:34.601051 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.2333226760000002 podStartE2EDuration="11.601027365s" podCreationTimestamp="2026-01-05 22:12:23 +0000 UTC" firstStartedPulling="2026-01-05 22:12:24.524869925 +0000 UTC m=+1276.102367595" lastFinishedPulling="2026-01-05 22:12:33.892574614 +0000 UTC m=+1285.470072284" observedRunningTime="2026-01-05 22:12:34.597282881 +0000 UTC m=+1286.174780551" watchObservedRunningTime="2026-01-05 22:12:34.601027365 +0000 UTC m=+1286.178525035" Jan 05 22:12:35 crc kubenswrapper[4910]: I0105 22:12:35.569596 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:12:35 crc kubenswrapper[4910]: I0105 22:12:35.570362 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d949f571-b2a3-43ba-8f4a-65d0a3eed74a" containerName="ceilometer-central-agent" containerID="cri-o://c60c8411f9c2399b8bae52ff71314c32f56a5a07355eed4bdebfb3ace1d5b675" gracePeriod=30 Jan 05 22:12:35 crc kubenswrapper[4910]: I0105 22:12:35.570419 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d949f571-b2a3-43ba-8f4a-65d0a3eed74a" containerName="proxy-httpd" containerID="cri-o://b5949571b11635d2d0c6aa2c54638023c2e8fa9a2a1bc6caa4e781c517195041" gracePeriod=30 Jan 05 22:12:35 crc kubenswrapper[4910]: I0105 22:12:35.570418 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d949f571-b2a3-43ba-8f4a-65d0a3eed74a" containerName="sg-core" containerID="cri-o://92a0da346a6c3e2e2491fb688bd2124b3883a1389df5d37d28a3a0825bd343cf" gracePeriod=30 Jan 05 22:12:35 crc kubenswrapper[4910]: I0105 22:12:35.570519 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d949f571-b2a3-43ba-8f4a-65d0a3eed74a" containerName="ceilometer-notification-agent" containerID="cri-o://c9da7326419acb23c844811323e67f423734a62690a3263e5e16ff1268ccc0c4" gracePeriod=30 Jan 05 22:12:35 crc kubenswrapper[4910]: I0105 22:12:35.592415 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="d949f571-b2a3-43ba-8f4a-65d0a3eed74a" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 502" Jan 05 22:12:35 crc kubenswrapper[4910]: I0105 22:12:35.599561 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-b7b888cd9-zwrvg" event={"ID":"24f2eef4-3eac-4643-bffa-0747afae172a","Type":"ContainerStarted","Data":"228c36e7a0ee3706097ffefa2fffb06644ad6938f9d1c4b5c163a457e6f37c30"} Jan 05 22:12:35 crc kubenswrapper[4910]: I0105 22:12:35.599610 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-b7b888cd9-zwrvg" event={"ID":"24f2eef4-3eac-4643-bffa-0747afae172a","Type":"ContainerStarted","Data":"484d4b08e459d7050c6c22231b91516731f4932da1c971f389be1e4993a99ee9"} Jan 05 22:12:35 crc kubenswrapper[4910]: I0105 22:12:35.599642 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-b7b888cd9-zwrvg" Jan 05 22:12:35 crc kubenswrapper[4910]: I0105 22:12:35.599666 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-b7b888cd9-zwrvg" Jan 05 22:12:35 crc kubenswrapper[4910]: I0105 22:12:35.628162 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-b7b888cd9-zwrvg" podStartSLOduration=6.628137206 podStartE2EDuration="6.628137206s" podCreationTimestamp="2026-01-05 22:12:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:12:35.615621983 +0000 UTC m=+1287.193119683" watchObservedRunningTime="2026-01-05 22:12:35.628137206 +0000 UTC m=+1287.205634876" Jan 05 22:12:36 crc kubenswrapper[4910]: I0105 22:12:36.613932 4910 generic.go:334] "Generic (PLEG): container finished" podID="d949f571-b2a3-43ba-8f4a-65d0a3eed74a" containerID="b5949571b11635d2d0c6aa2c54638023c2e8fa9a2a1bc6caa4e781c517195041" exitCode=0 Jan 05 22:12:36 crc kubenswrapper[4910]: I0105 22:12:36.614502 4910 generic.go:334] "Generic (PLEG): container finished" podID="d949f571-b2a3-43ba-8f4a-65d0a3eed74a" containerID="92a0da346a6c3e2e2491fb688bd2124b3883a1389df5d37d28a3a0825bd343cf" exitCode=2 Jan 05 22:12:36 crc kubenswrapper[4910]: I0105 22:12:36.614532 4910 generic.go:334] "Generic (PLEG): container finished" podID="d949f571-b2a3-43ba-8f4a-65d0a3eed74a" containerID="c60c8411f9c2399b8bae52ff71314c32f56a5a07355eed4bdebfb3ace1d5b675" exitCode=0 Jan 05 22:12:36 crc kubenswrapper[4910]: I0105 22:12:36.613997 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d949f571-b2a3-43ba-8f4a-65d0a3eed74a","Type":"ContainerDied","Data":"b5949571b11635d2d0c6aa2c54638023c2e8fa9a2a1bc6caa4e781c517195041"} Jan 05 22:12:36 crc kubenswrapper[4910]: I0105 22:12:36.614641 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d949f571-b2a3-43ba-8f4a-65d0a3eed74a","Type":"ContainerDied","Data":"92a0da346a6c3e2e2491fb688bd2124b3883a1389df5d37d28a3a0825bd343cf"} Jan 05 22:12:36 crc kubenswrapper[4910]: I0105 22:12:36.614685 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d949f571-b2a3-43ba-8f4a-65d0a3eed74a","Type":"ContainerDied","Data":"c60c8411f9c2399b8bae52ff71314c32f56a5a07355eed4bdebfb3ace1d5b675"} Jan 05 22:12:37 crc kubenswrapper[4910]: I0105 22:12:37.722699 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 22:12:37 crc kubenswrapper[4910]: I0105 22:12:37.723812 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="3a1e131f-00cf-4724-91e0-52d2766184d9" containerName="glance-log" containerID="cri-o://46d06fca16e6d7362ccaa51a4b5864275f1634fd78eaaef4118ec61bdc8a1f46" gracePeriod=30 Jan 05 22:12:37 crc kubenswrapper[4910]: I0105 22:12:37.723872 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="3a1e131f-00cf-4724-91e0-52d2766184d9" containerName="glance-httpd" containerID="cri-o://4bb93c94da9335681c7dcfe1904cc909f089d4f34d01f8611101a36972f9245d" gracePeriod=30 Jan 05 22:12:38 crc kubenswrapper[4910]: I0105 22:12:38.377503 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="d949f571-b2a3-43ba-8f4a-65d0a3eed74a" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.163:3000/\": dial tcp 10.217.0.163:3000: connect: connection refused" Jan 05 22:12:38 crc kubenswrapper[4910]: I0105 22:12:38.636952 4910 generic.go:334] "Generic (PLEG): container finished" podID="3a1e131f-00cf-4724-91e0-52d2766184d9" containerID="46d06fca16e6d7362ccaa51a4b5864275f1634fd78eaaef4118ec61bdc8a1f46" exitCode=143 Jan 05 22:12:38 crc kubenswrapper[4910]: I0105 22:12:38.637157 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"3a1e131f-00cf-4724-91e0-52d2766184d9","Type":"ContainerDied","Data":"46d06fca16e6d7362ccaa51a4b5864275f1634fd78eaaef4118ec61bdc8a1f46"} Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.113807 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.268744 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-scripts\") pod \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\" (UID: \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\") " Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.268931 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-prgfr\" (UniqueName: \"kubernetes.io/projected/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-kube-api-access-prgfr\") pod \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\" (UID: \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\") " Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.269137 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-log-httpd\") pod \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\" (UID: \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\") " Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.269224 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-config-data\") pod \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\" (UID: \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\") " Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.269304 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-combined-ca-bundle\") pod \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\" (UID: \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\") " Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.269391 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-sg-core-conf-yaml\") pod \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\" (UID: \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\") " Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.269434 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-run-httpd\") pod \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\" (UID: \"d949f571-b2a3-43ba-8f4a-65d0a3eed74a\") " Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.270583 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "d949f571-b2a3-43ba-8f4a-65d0a3eed74a" (UID: "d949f571-b2a3-43ba-8f4a-65d0a3eed74a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.271486 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "d949f571-b2a3-43ba-8f4a-65d0a3eed74a" (UID: "d949f571-b2a3-43ba-8f4a-65d0a3eed74a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.278261 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-scripts" (OuterVolumeSpecName: "scripts") pod "d949f571-b2a3-43ba-8f4a-65d0a3eed74a" (UID: "d949f571-b2a3-43ba-8f4a-65d0a3eed74a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.278404 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-kube-api-access-prgfr" (OuterVolumeSpecName: "kube-api-access-prgfr") pod "d949f571-b2a3-43ba-8f4a-65d0a3eed74a" (UID: "d949f571-b2a3-43ba-8f4a-65d0a3eed74a"). InnerVolumeSpecName "kube-api-access-prgfr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.316337 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "d949f571-b2a3-43ba-8f4a-65d0a3eed74a" (UID: "d949f571-b2a3-43ba-8f4a-65d0a3eed74a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.361676 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d949f571-b2a3-43ba-8f4a-65d0a3eed74a" (UID: "d949f571-b2a3-43ba-8f4a-65d0a3eed74a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.372629 4910 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.372681 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.372696 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-prgfr\" (UniqueName: \"kubernetes.io/projected/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-kube-api-access-prgfr\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.372707 4910 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.372723 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.373180 4910 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.437256 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-config-data" (OuterVolumeSpecName: "config-data") pod "d949f571-b2a3-43ba-8f4a-65d0a3eed74a" (UID: "d949f571-b2a3-43ba-8f4a-65d0a3eed74a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.474866 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d949f571-b2a3-43ba-8f4a-65d0a3eed74a-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.651406 4910 generic.go:334] "Generic (PLEG): container finished" podID="d949f571-b2a3-43ba-8f4a-65d0a3eed74a" containerID="c9da7326419acb23c844811323e67f423734a62690a3263e5e16ff1268ccc0c4" exitCode=0 Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.651506 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d949f571-b2a3-43ba-8f4a-65d0a3eed74a","Type":"ContainerDied","Data":"c9da7326419acb23c844811323e67f423734a62690a3263e5e16ff1268ccc0c4"} Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.651866 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d949f571-b2a3-43ba-8f4a-65d0a3eed74a","Type":"ContainerDied","Data":"61d5311da9ff4bf9e3bd08ed3628a5a67dbab1eafcb0e9c7c54d1d48198e5420"} Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.651542 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.651939 4910 scope.go:117] "RemoveContainer" containerID="b5949571b11635d2d0c6aa2c54638023c2e8fa9a2a1bc6caa4e781c517195041" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.678066 4910 scope.go:117] "RemoveContainer" containerID="92a0da346a6c3e2e2491fb688bd2124b3883a1389df5d37d28a3a0825bd343cf" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.696695 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.704971 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.731453 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:12:39 crc kubenswrapper[4910]: E0105 22:12:39.731942 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d949f571-b2a3-43ba-8f4a-65d0a3eed74a" containerName="ceilometer-central-agent" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.734387 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d949f571-b2a3-43ba-8f4a-65d0a3eed74a" containerName="ceilometer-central-agent" Jan 05 22:12:39 crc kubenswrapper[4910]: E0105 22:12:39.734487 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d949f571-b2a3-43ba-8f4a-65d0a3eed74a" containerName="sg-core" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.734634 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d949f571-b2a3-43ba-8f4a-65d0a3eed74a" containerName="sg-core" Jan 05 22:12:39 crc kubenswrapper[4910]: E0105 22:12:39.734689 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d949f571-b2a3-43ba-8f4a-65d0a3eed74a" containerName="ceilometer-notification-agent" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.734748 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d949f571-b2a3-43ba-8f4a-65d0a3eed74a" containerName="ceilometer-notification-agent" Jan 05 22:12:39 crc kubenswrapper[4910]: E0105 22:12:39.734815 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d949f571-b2a3-43ba-8f4a-65d0a3eed74a" containerName="proxy-httpd" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.734902 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d949f571-b2a3-43ba-8f4a-65d0a3eed74a" containerName="proxy-httpd" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.735168 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="d949f571-b2a3-43ba-8f4a-65d0a3eed74a" containerName="ceilometer-notification-agent" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.735260 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="d949f571-b2a3-43ba-8f4a-65d0a3eed74a" containerName="proxy-httpd" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.735325 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="d949f571-b2a3-43ba-8f4a-65d0a3eed74a" containerName="ceilometer-central-agent" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.735386 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="d949f571-b2a3-43ba-8f4a-65d0a3eed74a" containerName="sg-core" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.736091 4910 scope.go:117] "RemoveContainer" containerID="c9da7326419acb23c844811323e67f423734a62690a3263e5e16ff1268ccc0c4" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.739144 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.744365 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.745068 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.748850 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.798023 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-b7b888cd9-zwrvg" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.807521 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-b7b888cd9-zwrvg" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.813705 4910 scope.go:117] "RemoveContainer" containerID="c60c8411f9c2399b8bae52ff71314c32f56a5a07355eed4bdebfb3ace1d5b675" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.884868 4910 scope.go:117] "RemoveContainer" containerID="b5949571b11635d2d0c6aa2c54638023c2e8fa9a2a1bc6caa4e781c517195041" Jan 05 22:12:39 crc kubenswrapper[4910]: E0105 22:12:39.885296 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b5949571b11635d2d0c6aa2c54638023c2e8fa9a2a1bc6caa4e781c517195041\": container with ID starting with b5949571b11635d2d0c6aa2c54638023c2e8fa9a2a1bc6caa4e781c517195041 not found: ID does not exist" containerID="b5949571b11635d2d0c6aa2c54638023c2e8fa9a2a1bc6caa4e781c517195041" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.885336 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b5949571b11635d2d0c6aa2c54638023c2e8fa9a2a1bc6caa4e781c517195041"} err="failed to get container status \"b5949571b11635d2d0c6aa2c54638023c2e8fa9a2a1bc6caa4e781c517195041\": rpc error: code = NotFound desc = could not find container \"b5949571b11635d2d0c6aa2c54638023c2e8fa9a2a1bc6caa4e781c517195041\": container with ID starting with b5949571b11635d2d0c6aa2c54638023c2e8fa9a2a1bc6caa4e781c517195041 not found: ID does not exist" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.885360 4910 scope.go:117] "RemoveContainer" containerID="92a0da346a6c3e2e2491fb688bd2124b3883a1389df5d37d28a3a0825bd343cf" Jan 05 22:12:39 crc kubenswrapper[4910]: E0105 22:12:39.885625 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"92a0da346a6c3e2e2491fb688bd2124b3883a1389df5d37d28a3a0825bd343cf\": container with ID starting with 92a0da346a6c3e2e2491fb688bd2124b3883a1389df5d37d28a3a0825bd343cf not found: ID does not exist" containerID="92a0da346a6c3e2e2491fb688bd2124b3883a1389df5d37d28a3a0825bd343cf" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.885663 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"92a0da346a6c3e2e2491fb688bd2124b3883a1389df5d37d28a3a0825bd343cf"} err="failed to get container status \"92a0da346a6c3e2e2491fb688bd2124b3883a1389df5d37d28a3a0825bd343cf\": rpc error: code = NotFound desc = could not find container \"92a0da346a6c3e2e2491fb688bd2124b3883a1389df5d37d28a3a0825bd343cf\": container with ID starting with 92a0da346a6c3e2e2491fb688bd2124b3883a1389df5d37d28a3a0825bd343cf not found: ID does not exist" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.885677 4910 scope.go:117] "RemoveContainer" containerID="c9da7326419acb23c844811323e67f423734a62690a3263e5e16ff1268ccc0c4" Jan 05 22:12:39 crc kubenswrapper[4910]: E0105 22:12:39.886046 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c9da7326419acb23c844811323e67f423734a62690a3263e5e16ff1268ccc0c4\": container with ID starting with c9da7326419acb23c844811323e67f423734a62690a3263e5e16ff1268ccc0c4 not found: ID does not exist" containerID="c9da7326419acb23c844811323e67f423734a62690a3263e5e16ff1268ccc0c4" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.886072 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9da7326419acb23c844811323e67f423734a62690a3263e5e16ff1268ccc0c4"} err="failed to get container status \"c9da7326419acb23c844811323e67f423734a62690a3263e5e16ff1268ccc0c4\": rpc error: code = NotFound desc = could not find container \"c9da7326419acb23c844811323e67f423734a62690a3263e5e16ff1268ccc0c4\": container with ID starting with c9da7326419acb23c844811323e67f423734a62690a3263e5e16ff1268ccc0c4 not found: ID does not exist" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.886093 4910 scope.go:117] "RemoveContainer" containerID="c60c8411f9c2399b8bae52ff71314c32f56a5a07355eed4bdebfb3ace1d5b675" Jan 05 22:12:39 crc kubenswrapper[4910]: E0105 22:12:39.886373 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c60c8411f9c2399b8bae52ff71314c32f56a5a07355eed4bdebfb3ace1d5b675\": container with ID starting with c60c8411f9c2399b8bae52ff71314c32f56a5a07355eed4bdebfb3ace1d5b675 not found: ID does not exist" containerID="c60c8411f9c2399b8bae52ff71314c32f56a5a07355eed4bdebfb3ace1d5b675" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.886401 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c60c8411f9c2399b8bae52ff71314c32f56a5a07355eed4bdebfb3ace1d5b675"} err="failed to get container status \"c60c8411f9c2399b8bae52ff71314c32f56a5a07355eed4bdebfb3ace1d5b675\": rpc error: code = NotFound desc = could not find container \"c60c8411f9c2399b8bae52ff71314c32f56a5a07355eed4bdebfb3ace1d5b675\": container with ID starting with c60c8411f9c2399b8bae52ff71314c32f56a5a07355eed4bdebfb3ace1d5b675 not found: ID does not exist" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.891869 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-log-httpd\") pod \"ceilometer-0\" (UID: \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\") " pod="openstack/ceilometer-0" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.892016 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\") " pod="openstack/ceilometer-0" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.892064 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-scripts\") pod \"ceilometer-0\" (UID: \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\") " pod="openstack/ceilometer-0" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.892088 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-config-data\") pod \"ceilometer-0\" (UID: \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\") " pod="openstack/ceilometer-0" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.892406 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bh2l4\" (UniqueName: \"kubernetes.io/projected/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-kube-api-access-bh2l4\") pod \"ceilometer-0\" (UID: \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\") " pod="openstack/ceilometer-0" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.892432 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-run-httpd\") pod \"ceilometer-0\" (UID: \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\") " pod="openstack/ceilometer-0" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.893635 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\") " pod="openstack/ceilometer-0" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.996187 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\") " pod="openstack/ceilometer-0" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.996238 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-scripts\") pod \"ceilometer-0\" (UID: \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\") " pod="openstack/ceilometer-0" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.996271 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-config-data\") pod \"ceilometer-0\" (UID: \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\") " pod="openstack/ceilometer-0" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.996330 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bh2l4\" (UniqueName: \"kubernetes.io/projected/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-kube-api-access-bh2l4\") pod \"ceilometer-0\" (UID: \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\") " pod="openstack/ceilometer-0" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.996357 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-run-httpd\") pod \"ceilometer-0\" (UID: \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\") " pod="openstack/ceilometer-0" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.996468 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\") " pod="openstack/ceilometer-0" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.996595 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-log-httpd\") pod \"ceilometer-0\" (UID: \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\") " pod="openstack/ceilometer-0" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.997073 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-log-httpd\") pod \"ceilometer-0\" (UID: \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\") " pod="openstack/ceilometer-0" Jan 05 22:12:39 crc kubenswrapper[4910]: I0105 22:12:39.997675 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-run-httpd\") pod \"ceilometer-0\" (UID: \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\") " pod="openstack/ceilometer-0" Jan 05 22:12:40 crc kubenswrapper[4910]: I0105 22:12:40.001795 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-scripts\") pod \"ceilometer-0\" (UID: \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\") " pod="openstack/ceilometer-0" Jan 05 22:12:40 crc kubenswrapper[4910]: I0105 22:12:40.003963 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\") " pod="openstack/ceilometer-0" Jan 05 22:12:40 crc kubenswrapper[4910]: I0105 22:12:40.004227 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\") " pod="openstack/ceilometer-0" Jan 05 22:12:40 crc kubenswrapper[4910]: I0105 22:12:40.004358 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-config-data\") pod \"ceilometer-0\" (UID: \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\") " pod="openstack/ceilometer-0" Jan 05 22:12:40 crc kubenswrapper[4910]: I0105 22:12:40.023743 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bh2l4\" (UniqueName: \"kubernetes.io/projected/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-kube-api-access-bh2l4\") pod \"ceilometer-0\" (UID: \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\") " pod="openstack/ceilometer-0" Jan 05 22:12:40 crc kubenswrapper[4910]: I0105 22:12:40.116258 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 22:12:40 crc kubenswrapper[4910]: I0105 22:12:40.633212 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:12:40 crc kubenswrapper[4910]: W0105 22:12:40.645636 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27f276b1_32a3_4360_b3fd_a3e9ffecaf1f.slice/crio-1f343421927bc9ab89a40901de0aa4a05c6497f3cc53fadd1f2f212a3de1c9be WatchSource:0}: Error finding container 1f343421927bc9ab89a40901de0aa4a05c6497f3cc53fadd1f2f212a3de1c9be: Status 404 returned error can't find the container with id 1f343421927bc9ab89a40901de0aa4a05c6497f3cc53fadd1f2f212a3de1c9be Jan 05 22:12:40 crc kubenswrapper[4910]: I0105 22:12:40.661791 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f","Type":"ContainerStarted","Data":"1f343421927bc9ab89a40901de0aa4a05c6497f3cc53fadd1f2f212a3de1c9be"} Jan 05 22:12:40 crc kubenswrapper[4910]: I0105 22:12:40.735597 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d949f571-b2a3-43ba-8f4a-65d0a3eed74a" path="/var/lib/kubelet/pods/d949f571-b2a3-43ba-8f4a-65d0a3eed74a/volumes" Jan 05 22:12:40 crc kubenswrapper[4910]: I0105 22:12:40.977939 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="3a1e131f-00cf-4724-91e0-52d2766184d9" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.147:9292/healthcheck\": read tcp 10.217.0.2:53658->10.217.0.147:9292: read: connection reset by peer" Jan 05 22:12:40 crc kubenswrapper[4910]: I0105 22:12:40.978096 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="3a1e131f-00cf-4724-91e0-52d2766184d9" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.147:9292/healthcheck\": read tcp 10.217.0.2:53652->10.217.0.147:9292: read: connection reset by peer" Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.386842 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.548657 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.644353 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a1e131f-00cf-4724-91e0-52d2766184d9-config-data\") pod \"3a1e131f-00cf-4724-91e0-52d2766184d9\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") " Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.644429 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a1e131f-00cf-4724-91e0-52d2766184d9-public-tls-certs\") pod \"3a1e131f-00cf-4724-91e0-52d2766184d9\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") " Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.644496 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a1e131f-00cf-4724-91e0-52d2766184d9-combined-ca-bundle\") pod \"3a1e131f-00cf-4724-91e0-52d2766184d9\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") " Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.644597 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3a1e131f-00cf-4724-91e0-52d2766184d9-httpd-run\") pod \"3a1e131f-00cf-4724-91e0-52d2766184d9\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") " Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.644675 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a1e131f-00cf-4724-91e0-52d2766184d9-scripts\") pod \"3a1e131f-00cf-4724-91e0-52d2766184d9\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") " Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.644735 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a1e131f-00cf-4724-91e0-52d2766184d9-logs\") pod \"3a1e131f-00cf-4724-91e0-52d2766184d9\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") " Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.644758 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-22jt6\" (UniqueName: \"kubernetes.io/projected/3a1e131f-00cf-4724-91e0-52d2766184d9-kube-api-access-22jt6\") pod \"3a1e131f-00cf-4724-91e0-52d2766184d9\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") " Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.644787 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"3a1e131f-00cf-4724-91e0-52d2766184d9\" (UID: \"3a1e131f-00cf-4724-91e0-52d2766184d9\") " Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.645426 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a1e131f-00cf-4724-91e0-52d2766184d9-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "3a1e131f-00cf-4724-91e0-52d2766184d9" (UID: "3a1e131f-00cf-4724-91e0-52d2766184d9"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.650471 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a1e131f-00cf-4724-91e0-52d2766184d9-logs" (OuterVolumeSpecName: "logs") pod "3a1e131f-00cf-4724-91e0-52d2766184d9" (UID: "3a1e131f-00cf-4724-91e0-52d2766184d9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.651620 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "glance") pod "3a1e131f-00cf-4724-91e0-52d2766184d9" (UID: "3a1e131f-00cf-4724-91e0-52d2766184d9"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.654277 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a1e131f-00cf-4724-91e0-52d2766184d9-scripts" (OuterVolumeSpecName: "scripts") pod "3a1e131f-00cf-4724-91e0-52d2766184d9" (UID: "3a1e131f-00cf-4724-91e0-52d2766184d9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.657748 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a1e131f-00cf-4724-91e0-52d2766184d9-kube-api-access-22jt6" (OuterVolumeSpecName: "kube-api-access-22jt6") pod "3a1e131f-00cf-4724-91e0-52d2766184d9" (UID: "3a1e131f-00cf-4724-91e0-52d2766184d9"). InnerVolumeSpecName "kube-api-access-22jt6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.677331 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f","Type":"ContainerStarted","Data":"57741184ba5a88618ca0d86e7f210658789e4e83c95701ae6da926f9f1848f81"} Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.678964 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a1e131f-00cf-4724-91e0-52d2766184d9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3a1e131f-00cf-4724-91e0-52d2766184d9" (UID: "3a1e131f-00cf-4724-91e0-52d2766184d9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.679798 4910 generic.go:334] "Generic (PLEG): container finished" podID="3a1e131f-00cf-4724-91e0-52d2766184d9" containerID="4bb93c94da9335681c7dcfe1904cc909f089d4f34d01f8611101a36972f9245d" exitCode=0 Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.679853 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"3a1e131f-00cf-4724-91e0-52d2766184d9","Type":"ContainerDied","Data":"4bb93c94da9335681c7dcfe1904cc909f089d4f34d01f8611101a36972f9245d"} Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.679891 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"3a1e131f-00cf-4724-91e0-52d2766184d9","Type":"ContainerDied","Data":"da8afd18292c1ea76d28e6453baf0e350eca22f2d44109a0756e9815b4251d31"} Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.679910 4910 scope.go:117] "RemoveContainer" containerID="4bb93c94da9335681c7dcfe1904cc909f089d4f34d01f8611101a36972f9245d" Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.680085 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.721223 4910 scope.go:117] "RemoveContainer" containerID="46d06fca16e6d7362ccaa51a4b5864275f1634fd78eaaef4118ec61bdc8a1f46" Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.721699 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a1e131f-00cf-4724-91e0-52d2766184d9-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "3a1e131f-00cf-4724-91e0-52d2766184d9" (UID: "3a1e131f-00cf-4724-91e0-52d2766184d9"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.729305 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a1e131f-00cf-4724-91e0-52d2766184d9-config-data" (OuterVolumeSpecName: "config-data") pod "3a1e131f-00cf-4724-91e0-52d2766184d9" (UID: "3a1e131f-00cf-4724-91e0-52d2766184d9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.746663 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a1e131f-00cf-4724-91e0-52d2766184d9-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.746713 4910 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a1e131f-00cf-4724-91e0-52d2766184d9-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.746730 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a1e131f-00cf-4724-91e0-52d2766184d9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.746905 4910 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3a1e131f-00cf-4724-91e0-52d2766184d9-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.747726 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a1e131f-00cf-4724-91e0-52d2766184d9-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.747750 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a1e131f-00cf-4724-91e0-52d2766184d9-logs\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.747765 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-22jt6\" (UniqueName: \"kubernetes.io/projected/3a1e131f-00cf-4724-91e0-52d2766184d9-kube-api-access-22jt6\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.747796 4910 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.752404 4910 scope.go:117] "RemoveContainer" containerID="4bb93c94da9335681c7dcfe1904cc909f089d4f34d01f8611101a36972f9245d" Jan 05 22:12:41 crc kubenswrapper[4910]: E0105 22:12:41.753255 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4bb93c94da9335681c7dcfe1904cc909f089d4f34d01f8611101a36972f9245d\": container with ID starting with 4bb93c94da9335681c7dcfe1904cc909f089d4f34d01f8611101a36972f9245d not found: ID does not exist" containerID="4bb93c94da9335681c7dcfe1904cc909f089d4f34d01f8611101a36972f9245d" Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.753347 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4bb93c94da9335681c7dcfe1904cc909f089d4f34d01f8611101a36972f9245d"} err="failed to get container status \"4bb93c94da9335681c7dcfe1904cc909f089d4f34d01f8611101a36972f9245d\": rpc error: code = NotFound desc = could not find container \"4bb93c94da9335681c7dcfe1904cc909f089d4f34d01f8611101a36972f9245d\": container with ID starting with 4bb93c94da9335681c7dcfe1904cc909f089d4f34d01f8611101a36972f9245d not found: ID does not exist" Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.753436 4910 scope.go:117] "RemoveContainer" containerID="46d06fca16e6d7362ccaa51a4b5864275f1634fd78eaaef4118ec61bdc8a1f46" Jan 05 22:12:41 crc kubenswrapper[4910]: E0105 22:12:41.753770 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"46d06fca16e6d7362ccaa51a4b5864275f1634fd78eaaef4118ec61bdc8a1f46\": container with ID starting with 46d06fca16e6d7362ccaa51a4b5864275f1634fd78eaaef4118ec61bdc8a1f46 not found: ID does not exist" containerID="46d06fca16e6d7362ccaa51a4b5864275f1634fd78eaaef4118ec61bdc8a1f46" Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.753850 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46d06fca16e6d7362ccaa51a4b5864275f1634fd78eaaef4118ec61bdc8a1f46"} err="failed to get container status \"46d06fca16e6d7362ccaa51a4b5864275f1634fd78eaaef4118ec61bdc8a1f46\": rpc error: code = NotFound desc = could not find container \"46d06fca16e6d7362ccaa51a4b5864275f1634fd78eaaef4118ec61bdc8a1f46\": container with ID starting with 46d06fca16e6d7362ccaa51a4b5864275f1634fd78eaaef4118ec61bdc8a1f46 not found: ID does not exist" Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.776389 4910 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Jan 05 22:12:41 crc kubenswrapper[4910]: I0105 22:12:41.858432 4910 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.020932 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.034680 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.050393 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 22:12:42 crc kubenswrapper[4910]: E0105 22:12:42.050822 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a1e131f-00cf-4724-91e0-52d2766184d9" containerName="glance-httpd" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.050845 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a1e131f-00cf-4724-91e0-52d2766184d9" containerName="glance-httpd" Jan 05 22:12:42 crc kubenswrapper[4910]: E0105 22:12:42.050887 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a1e131f-00cf-4724-91e0-52d2766184d9" containerName="glance-log" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.050895 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a1e131f-00cf-4724-91e0-52d2766184d9" containerName="glance-log" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.051050 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a1e131f-00cf-4724-91e0-52d2766184d9" containerName="glance-log" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.051067 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a1e131f-00cf-4724-91e0-52d2766184d9" containerName="glance-httpd" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.052143 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.057136 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.057450 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.079934 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.166036 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8f43d30e-14e4-4978-bb02-a251305f9330-logs\") pod \"glance-default-external-api-0\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") " pod="openstack/glance-default-external-api-0" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.166203 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") " pod="openstack/glance-default-external-api-0" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.166428 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8f43d30e-14e4-4978-bb02-a251305f9330-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") " pod="openstack/glance-default-external-api-0" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.166793 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f43d30e-14e4-4978-bb02-a251305f9330-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") " pod="openstack/glance-default-external-api-0" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.166849 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f43d30e-14e4-4978-bb02-a251305f9330-scripts\") pod \"glance-default-external-api-0\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") " pod="openstack/glance-default-external-api-0" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.166927 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jdb2\" (UniqueName: \"kubernetes.io/projected/8f43d30e-14e4-4978-bb02-a251305f9330-kube-api-access-9jdb2\") pod \"glance-default-external-api-0\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") " pod="openstack/glance-default-external-api-0" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.167163 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8f43d30e-14e4-4978-bb02-a251305f9330-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") " pod="openstack/glance-default-external-api-0" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.167215 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f43d30e-14e4-4978-bb02-a251305f9330-config-data\") pod \"glance-default-external-api-0\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") " pod="openstack/glance-default-external-api-0" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.269946 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") " pod="openstack/glance-default-external-api-0" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.270081 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8f43d30e-14e4-4978-bb02-a251305f9330-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") " pod="openstack/glance-default-external-api-0" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.270211 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f43d30e-14e4-4978-bb02-a251305f9330-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") " pod="openstack/glance-default-external-api-0" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.270211 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/glance-default-external-api-0" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.270245 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f43d30e-14e4-4978-bb02-a251305f9330-scripts\") pod \"glance-default-external-api-0\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") " pod="openstack/glance-default-external-api-0" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.270273 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jdb2\" (UniqueName: \"kubernetes.io/projected/8f43d30e-14e4-4978-bb02-a251305f9330-kube-api-access-9jdb2\") pod \"glance-default-external-api-0\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") " pod="openstack/glance-default-external-api-0" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.270568 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8f43d30e-14e4-4978-bb02-a251305f9330-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") " pod="openstack/glance-default-external-api-0" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.270603 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f43d30e-14e4-4978-bb02-a251305f9330-config-data\") pod \"glance-default-external-api-0\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") " pod="openstack/glance-default-external-api-0" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.270662 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8f43d30e-14e4-4978-bb02-a251305f9330-logs\") pod \"glance-default-external-api-0\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") " pod="openstack/glance-default-external-api-0" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.270982 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8f43d30e-14e4-4978-bb02-a251305f9330-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") " pod="openstack/glance-default-external-api-0" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.271178 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8f43d30e-14e4-4978-bb02-a251305f9330-logs\") pod \"glance-default-external-api-0\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") " pod="openstack/glance-default-external-api-0" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.276201 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f43d30e-14e4-4978-bb02-a251305f9330-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") " pod="openstack/glance-default-external-api-0" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.280051 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f43d30e-14e4-4978-bb02-a251305f9330-scripts\") pod \"glance-default-external-api-0\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") " pod="openstack/glance-default-external-api-0" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.280239 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8f43d30e-14e4-4978-bb02-a251305f9330-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") " pod="openstack/glance-default-external-api-0" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.283340 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f43d30e-14e4-4978-bb02-a251305f9330-config-data\") pod \"glance-default-external-api-0\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") " pod="openstack/glance-default-external-api-0" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.293505 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jdb2\" (UniqueName: \"kubernetes.io/projected/8f43d30e-14e4-4978-bb02-a251305f9330-kube-api-access-9jdb2\") pod \"glance-default-external-api-0\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") " pod="openstack/glance-default-external-api-0" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.398519 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") " pod="openstack/glance-default-external-api-0" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.440386 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 05 22:12:42 crc kubenswrapper[4910]: I0105 22:12:42.756192 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a1e131f-00cf-4724-91e0-52d2766184d9" path="/var/lib/kubelet/pods/3a1e131f-00cf-4724-91e0-52d2766184d9/volumes" Jan 05 22:12:43 crc kubenswrapper[4910]: W0105 22:12:43.036412 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8f43d30e_14e4_4978_bb02_a251305f9330.slice/crio-10308827d807bf76a4c1cd30847c997b175f6aad1bb3dcca809249e90cf15140 WatchSource:0}: Error finding container 10308827d807bf76a4c1cd30847c997b175f6aad1bb3dcca809249e90cf15140: Status 404 returned error can't find the container with id 10308827d807bf76a4c1cd30847c997b175f6aad1bb3dcca809249e90cf15140 Jan 05 22:12:43 crc kubenswrapper[4910]: I0105 22:12:43.051530 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 22:12:43 crc kubenswrapper[4910]: I0105 22:12:43.710428 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8f43d30e-14e4-4978-bb02-a251305f9330","Type":"ContainerStarted","Data":"108372c447325380382fbbd2e70aa8ef323e8b23d29f6e32887e63496ac39324"} Jan 05 22:12:43 crc kubenswrapper[4910]: I0105 22:12:43.710953 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8f43d30e-14e4-4978-bb02-a251305f9330","Type":"ContainerStarted","Data":"10308827d807bf76a4c1cd30847c997b175f6aad1bb3dcca809249e90cf15140"} Jan 05 22:12:43 crc kubenswrapper[4910]: I0105 22:12:43.717479 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f","Type":"ContainerStarted","Data":"fac75f2de0171fc42bd047126a132f1e82c6101d0364e45e74e3a52498cd8f7c"} Jan 05 22:12:44 crc kubenswrapper[4910]: I0105 22:12:44.738873 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8f43d30e-14e4-4978-bb02-a251305f9330","Type":"ContainerStarted","Data":"e302bde0bc25b21936e7ca65ca2849db5acaa0ddf0792ac1f5ffccee28c53746"} Jan 05 22:12:45 crc kubenswrapper[4910]: I0105 22:12:45.543414 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.543390537 podStartE2EDuration="3.543390537s" podCreationTimestamp="2026-01-05 22:12:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:12:44.784875203 +0000 UTC m=+1296.362372873" watchObservedRunningTime="2026-01-05 22:12:45.543390537 +0000 UTC m=+1297.120888207" Jan 05 22:12:45 crc kubenswrapper[4910]: I0105 22:12:45.546067 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 22:12:45 crc kubenswrapper[4910]: I0105 22:12:45.546393 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="d5e3749f-8afb-49a4-b1e0-a46951b4ddee" containerName="glance-log" containerID="cri-o://592ffa71e930006aebe23469038788c9c804dabb4a613441b6665be044bb977f" gracePeriod=30 Jan 05 22:12:45 crc kubenswrapper[4910]: I0105 22:12:45.546458 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="d5e3749f-8afb-49a4-b1e0-a46951b4ddee" containerName="glance-httpd" containerID="cri-o://948b179b1d2f802a2ca4c0b12b0d6fda3e792316c100f83b47f7519be94b41d6" gracePeriod=30 Jan 05 22:12:45 crc kubenswrapper[4910]: I0105 22:12:45.739510 4910 generic.go:334] "Generic (PLEG): container finished" podID="d5e3749f-8afb-49a4-b1e0-a46951b4ddee" containerID="592ffa71e930006aebe23469038788c9c804dabb4a613441b6665be044bb977f" exitCode=143 Jan 05 22:12:45 crc kubenswrapper[4910]: I0105 22:12:45.739581 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d5e3749f-8afb-49a4-b1e0-a46951b4ddee","Type":"ContainerDied","Data":"592ffa71e930006aebe23469038788c9c804dabb4a613441b6665be044bb977f"} Jan 05 22:12:45 crc kubenswrapper[4910]: I0105 22:12:45.741987 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f","Type":"ContainerStarted","Data":"d3ccba0baeada4c24ebb0e227d06a9e60a0770bb358dcb1735058232db65b1c4"} Jan 05 22:12:47 crc kubenswrapper[4910]: I0105 22:12:47.771578 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f","Type":"ContainerStarted","Data":"92d1320417e681ee89e72c5b6f21483ee13bef556b4e17a263fc20c55ec6448f"} Jan 05 22:12:47 crc kubenswrapper[4910]: I0105 22:12:47.772265 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 05 22:12:47 crc kubenswrapper[4910]: I0105 22:12:47.771831 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="27f276b1-32a3-4360-b3fd-a3e9ffecaf1f" containerName="proxy-httpd" containerID="cri-o://92d1320417e681ee89e72c5b6f21483ee13bef556b4e17a263fc20c55ec6448f" gracePeriod=30 Jan 05 22:12:47 crc kubenswrapper[4910]: I0105 22:12:47.771793 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="27f276b1-32a3-4360-b3fd-a3e9ffecaf1f" containerName="sg-core" containerID="cri-o://d3ccba0baeada4c24ebb0e227d06a9e60a0770bb358dcb1735058232db65b1c4" gracePeriod=30 Jan 05 22:12:47 crc kubenswrapper[4910]: I0105 22:12:47.771978 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="27f276b1-32a3-4360-b3fd-a3e9ffecaf1f" containerName="ceilometer-central-agent" containerID="cri-o://57741184ba5a88618ca0d86e7f210658789e4e83c95701ae6da926f9f1848f81" gracePeriod=30 Jan 05 22:12:47 crc kubenswrapper[4910]: I0105 22:12:47.771879 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="27f276b1-32a3-4360-b3fd-a3e9ffecaf1f" containerName="ceilometer-notification-agent" containerID="cri-o://fac75f2de0171fc42bd047126a132f1e82c6101d0364e45e74e3a52498cd8f7c" gracePeriod=30 Jan 05 22:12:47 crc kubenswrapper[4910]: I0105 22:12:47.803105 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.654622324 podStartE2EDuration="8.803084676s" podCreationTimestamp="2026-01-05 22:12:39 +0000 UTC" firstStartedPulling="2026-01-05 22:12:40.648288253 +0000 UTC m=+1292.225785923" lastFinishedPulling="2026-01-05 22:12:46.796750605 +0000 UTC m=+1298.374248275" observedRunningTime="2026-01-05 22:12:47.795434055 +0000 UTC m=+1299.372931725" watchObservedRunningTime="2026-01-05 22:12:47.803084676 +0000 UTC m=+1299.380582346" Jan 05 22:12:48 crc kubenswrapper[4910]: I0105 22:12:48.798166 4910 generic.go:334] "Generic (PLEG): container finished" podID="d5e3749f-8afb-49a4-b1e0-a46951b4ddee" containerID="948b179b1d2f802a2ca4c0b12b0d6fda3e792316c100f83b47f7519be94b41d6" exitCode=0 Jan 05 22:12:48 crc kubenswrapper[4910]: I0105 22:12:48.798244 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d5e3749f-8afb-49a4-b1e0-a46951b4ddee","Type":"ContainerDied","Data":"948b179b1d2f802a2ca4c0b12b0d6fda3e792316c100f83b47f7519be94b41d6"} Jan 05 22:12:48 crc kubenswrapper[4910]: I0105 22:12:48.811973 4910 generic.go:334] "Generic (PLEG): container finished" podID="27f276b1-32a3-4360-b3fd-a3e9ffecaf1f" containerID="92d1320417e681ee89e72c5b6f21483ee13bef556b4e17a263fc20c55ec6448f" exitCode=0 Jan 05 22:12:48 crc kubenswrapper[4910]: I0105 22:12:48.812024 4910 generic.go:334] "Generic (PLEG): container finished" podID="27f276b1-32a3-4360-b3fd-a3e9ffecaf1f" containerID="d3ccba0baeada4c24ebb0e227d06a9e60a0770bb358dcb1735058232db65b1c4" exitCode=2 Jan 05 22:12:48 crc kubenswrapper[4910]: I0105 22:12:48.812035 4910 generic.go:334] "Generic (PLEG): container finished" podID="27f276b1-32a3-4360-b3fd-a3e9ffecaf1f" containerID="fac75f2de0171fc42bd047126a132f1e82c6101d0364e45e74e3a52498cd8f7c" exitCode=0 Jan 05 22:12:48 crc kubenswrapper[4910]: I0105 22:12:48.812061 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f","Type":"ContainerDied","Data":"92d1320417e681ee89e72c5b6f21483ee13bef556b4e17a263fc20c55ec6448f"} Jan 05 22:12:48 crc kubenswrapper[4910]: I0105 22:12:48.812096 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f","Type":"ContainerDied","Data":"d3ccba0baeada4c24ebb0e227d06a9e60a0770bb358dcb1735058232db65b1c4"} Jan 05 22:12:48 crc kubenswrapper[4910]: I0105 22:12:48.812108 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f","Type":"ContainerDied","Data":"fac75f2de0171fc42bd047126a132f1e82c6101d0364e45e74e3a52498cd8f7c"} Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.270246 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.434402 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-logs\") pod \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") " Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.434455 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-config-data\") pod \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") " Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.434514 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfrnt\" (UniqueName: \"kubernetes.io/projected/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-kube-api-access-cfrnt\") pod \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") " Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.434570 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-httpd-run\") pod \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") " Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.434602 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") " Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.434693 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-combined-ca-bundle\") pod \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") " Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.434723 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-internal-tls-certs\") pod \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") " Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.434773 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-scripts\") pod \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\" (UID: \"d5e3749f-8afb-49a4-b1e0-a46951b4ddee\") " Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.435068 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "d5e3749f-8afb-49a4-b1e0-a46951b4ddee" (UID: "d5e3749f-8afb-49a4-b1e0-a46951b4ddee"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.435249 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-logs" (OuterVolumeSpecName: "logs") pod "d5e3749f-8afb-49a4-b1e0-a46951b4ddee" (UID: "d5e3749f-8afb-49a4-b1e0-a46951b4ddee"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.442529 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "d5e3749f-8afb-49a4-b1e0-a46951b4ddee" (UID: "d5e3749f-8afb-49a4-b1e0-a46951b4ddee"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.442537 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-scripts" (OuterVolumeSpecName: "scripts") pod "d5e3749f-8afb-49a4-b1e0-a46951b4ddee" (UID: "d5e3749f-8afb-49a4-b1e0-a46951b4ddee"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.442612 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-kube-api-access-cfrnt" (OuterVolumeSpecName: "kube-api-access-cfrnt") pod "d5e3749f-8afb-49a4-b1e0-a46951b4ddee" (UID: "d5e3749f-8afb-49a4-b1e0-a46951b4ddee"). InnerVolumeSpecName "kube-api-access-cfrnt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.464596 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d5e3749f-8afb-49a4-b1e0-a46951b4ddee" (UID: "d5e3749f-8afb-49a4-b1e0-a46951b4ddee"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.492401 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-config-data" (OuterVolumeSpecName: "config-data") pod "d5e3749f-8afb-49a4-b1e0-a46951b4ddee" (UID: "d5e3749f-8afb-49a4-b1e0-a46951b4ddee"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.498433 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "d5e3749f-8afb-49a4-b1e0-a46951b4ddee" (UID: "d5e3749f-8afb-49a4-b1e0-a46951b4ddee"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.537521 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-logs\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.537571 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.537587 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfrnt\" (UniqueName: \"kubernetes.io/projected/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-kube-api-access-cfrnt\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.537603 4910 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.537649 4910 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.537664 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.537678 4910 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.537690 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d5e3749f-8afb-49a4-b1e0-a46951b4ddee-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.559257 4910 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.639850 4910 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.822986 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d5e3749f-8afb-49a4-b1e0-a46951b4ddee","Type":"ContainerDied","Data":"26139540e0d40c6ac9a618cca43f1558d5fb3abed6400dc6940b3d78b8054ceb"} Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.823050 4910 scope.go:117] "RemoveContainer" containerID="948b179b1d2f802a2ca4c0b12b0d6fda3e792316c100f83b47f7519be94b41d6" Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.823068 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.852955 4910 scope.go:117] "RemoveContainer" containerID="592ffa71e930006aebe23469038788c9c804dabb4a613441b6665be044bb977f" Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.863593 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.873270 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.892193 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 22:12:49 crc kubenswrapper[4910]: E0105 22:12:49.892592 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5e3749f-8afb-49a4-b1e0-a46951b4ddee" containerName="glance-log" Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.892610 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5e3749f-8afb-49a4-b1e0-a46951b4ddee" containerName="glance-log" Jan 05 22:12:49 crc kubenswrapper[4910]: E0105 22:12:49.892641 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5e3749f-8afb-49a4-b1e0-a46951b4ddee" containerName="glance-httpd" Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.892651 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5e3749f-8afb-49a4-b1e0-a46951b4ddee" containerName="glance-httpd" Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.892845 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5e3749f-8afb-49a4-b1e0-a46951b4ddee" containerName="glance-httpd" Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.892872 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5e3749f-8afb-49a4-b1e0-a46951b4ddee" containerName="glance-log" Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.893820 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.895732 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.896275 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 05 22:12:49 crc kubenswrapper[4910]: I0105 22:12:49.925783 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 22:12:50 crc kubenswrapper[4910]: I0105 22:12:50.047052 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:12:50 crc kubenswrapper[4910]: I0105 22:12:50.047133 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/70100901-0709-4900-ac75-462a85b350c3-logs\") pod \"glance-default-internal-api-0\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:12:50 crc kubenswrapper[4910]: I0105 22:12:50.047161 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70100901-0709-4900-ac75-462a85b350c3-config-data\") pod \"glance-default-internal-api-0\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:12:50 crc kubenswrapper[4910]: I0105 22:12:50.047183 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/70100901-0709-4900-ac75-462a85b350c3-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:12:50 crc kubenswrapper[4910]: I0105 22:12:50.047207 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70100901-0709-4900-ac75-462a85b350c3-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:12:50 crc kubenswrapper[4910]: I0105 22:12:50.047234 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/70100901-0709-4900-ac75-462a85b350c3-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:12:50 crc kubenswrapper[4910]: I0105 22:12:50.047372 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lc6jc\" (UniqueName: \"kubernetes.io/projected/70100901-0709-4900-ac75-462a85b350c3-kube-api-access-lc6jc\") pod \"glance-default-internal-api-0\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:12:50 crc kubenswrapper[4910]: I0105 22:12:50.047880 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/70100901-0709-4900-ac75-462a85b350c3-scripts\") pod \"glance-default-internal-api-0\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:12:50 crc kubenswrapper[4910]: I0105 22:12:50.149684 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/70100901-0709-4900-ac75-462a85b350c3-scripts\") pod \"glance-default-internal-api-0\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:12:50 crc kubenswrapper[4910]: I0105 22:12:50.149780 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:12:50 crc kubenswrapper[4910]: I0105 22:12:50.149831 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/70100901-0709-4900-ac75-462a85b350c3-logs\") pod \"glance-default-internal-api-0\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:12:50 crc kubenswrapper[4910]: I0105 22:12:50.149865 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70100901-0709-4900-ac75-462a85b350c3-config-data\") pod \"glance-default-internal-api-0\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:12:50 crc kubenswrapper[4910]: I0105 22:12:50.149894 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/70100901-0709-4900-ac75-462a85b350c3-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:12:50 crc kubenswrapper[4910]: I0105 22:12:50.149914 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70100901-0709-4900-ac75-462a85b350c3-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:12:50 crc kubenswrapper[4910]: I0105 22:12:50.149940 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/70100901-0709-4900-ac75-462a85b350c3-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:12:50 crc kubenswrapper[4910]: I0105 22:12:50.149963 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lc6jc\" (UniqueName: \"kubernetes.io/projected/70100901-0709-4900-ac75-462a85b350c3-kube-api-access-lc6jc\") pod \"glance-default-internal-api-0\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:12:50 crc kubenswrapper[4910]: I0105 22:12:50.150829 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-internal-api-0" Jan 05 22:12:50 crc kubenswrapper[4910]: I0105 22:12:50.153217 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/70100901-0709-4900-ac75-462a85b350c3-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:12:50 crc kubenswrapper[4910]: I0105 22:12:50.159385 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/70100901-0709-4900-ac75-462a85b350c3-logs\") pod \"glance-default-internal-api-0\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:12:50 crc kubenswrapper[4910]: I0105 22:12:50.159675 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/70100901-0709-4900-ac75-462a85b350c3-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:12:50 crc kubenswrapper[4910]: I0105 22:12:50.161960 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/70100901-0709-4900-ac75-462a85b350c3-scripts\") pod \"glance-default-internal-api-0\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:12:50 crc kubenswrapper[4910]: I0105 22:12:50.162370 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70100901-0709-4900-ac75-462a85b350c3-config-data\") pod \"glance-default-internal-api-0\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:12:50 crc kubenswrapper[4910]: I0105 22:12:50.162496 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70100901-0709-4900-ac75-462a85b350c3-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:12:50 crc kubenswrapper[4910]: I0105 22:12:50.166406 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lc6jc\" (UniqueName: \"kubernetes.io/projected/70100901-0709-4900-ac75-462a85b350c3-kube-api-access-lc6jc\") pod \"glance-default-internal-api-0\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:12:50 crc kubenswrapper[4910]: I0105 22:12:50.185759 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") " pod="openstack/glance-default-internal-api-0" Jan 05 22:12:50 crc kubenswrapper[4910]: I0105 22:12:50.246807 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 05 22:12:50 crc kubenswrapper[4910]: I0105 22:12:50.739840 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d5e3749f-8afb-49a4-b1e0-a46951b4ddee" path="/var/lib/kubelet/pods/d5e3749f-8afb-49a4-b1e0-a46951b4ddee/volumes" Jan 05 22:12:50 crc kubenswrapper[4910]: W0105 22:12:50.935429 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod70100901_0709_4900_ac75_462a85b350c3.slice/crio-c242a4f524cd3c368b38d7d0d20bc78c77a89ca828ad4dca71070afad45d6804 WatchSource:0}: Error finding container c242a4f524cd3c368b38d7d0d20bc78c77a89ca828ad4dca71070afad45d6804: Status 404 returned error can't find the container with id c242a4f524cd3c368b38d7d0d20bc78c77a89ca828ad4dca71070afad45d6804 Jan 05 22:12:50 crc kubenswrapper[4910]: I0105 22:12:50.942505 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 22:12:51 crc kubenswrapper[4910]: I0105 22:12:51.853680 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"70100901-0709-4900-ac75-462a85b350c3","Type":"ContainerStarted","Data":"78b733a8056419d98b27c49e64b19c3144941beb236873f5de3f41a43f0fe70b"} Jan 05 22:12:51 crc kubenswrapper[4910]: I0105 22:12:51.853745 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"70100901-0709-4900-ac75-462a85b350c3","Type":"ContainerStarted","Data":"c242a4f524cd3c368b38d7d0d20bc78c77a89ca828ad4dca71070afad45d6804"} Jan 05 22:12:52 crc kubenswrapper[4910]: I0105 22:12:52.441434 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 05 22:12:52 crc kubenswrapper[4910]: I0105 22:12:52.441487 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 05 22:12:52 crc kubenswrapper[4910]: I0105 22:12:52.488233 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 05 22:12:52 crc kubenswrapper[4910]: I0105 22:12:52.501654 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 05 22:12:52 crc kubenswrapper[4910]: I0105 22:12:52.865987 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"70100901-0709-4900-ac75-462a85b350c3","Type":"ContainerStarted","Data":"1e10056784aaab7edb53371b1e8ee1b1dfc4d02346c220a12403d46024abfaa4"} Jan 05 22:12:52 crc kubenswrapper[4910]: I0105 22:12:52.866556 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 05 22:12:52 crc kubenswrapper[4910]: I0105 22:12:52.866619 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 05 22:12:52 crc kubenswrapper[4910]: I0105 22:12:52.889339 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.889315517 podStartE2EDuration="3.889315517s" podCreationTimestamp="2026-01-05 22:12:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:12:52.882833864 +0000 UTC m=+1304.460331534" watchObservedRunningTime="2026-01-05 22:12:52.889315517 +0000 UTC m=+1304.466813187" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.494098 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.552487 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-config-data\") pod \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\" (UID: \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\") " Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.552609 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-run-httpd\") pod \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\" (UID: \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\") " Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.552698 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-scripts\") pod \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\" (UID: \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\") " Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.552723 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-combined-ca-bundle\") pod \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\" (UID: \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\") " Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.552756 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-log-httpd\") pod \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\" (UID: \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\") " Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.552809 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bh2l4\" (UniqueName: \"kubernetes.io/projected/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-kube-api-access-bh2l4\") pod \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\" (UID: \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\") " Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.552857 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-sg-core-conf-yaml\") pod \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\" (UID: \"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f\") " Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.553288 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "27f276b1-32a3-4360-b3fd-a3e9ffecaf1f" (UID: "27f276b1-32a3-4360-b3fd-a3e9ffecaf1f"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.553538 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "27f276b1-32a3-4360-b3fd-a3e9ffecaf1f" (UID: "27f276b1-32a3-4360-b3fd-a3e9ffecaf1f"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.601998 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-scripts" (OuterVolumeSpecName: "scripts") pod "27f276b1-32a3-4360-b3fd-a3e9ffecaf1f" (UID: "27f276b1-32a3-4360-b3fd-a3e9ffecaf1f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.602033 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-kube-api-access-bh2l4" (OuterVolumeSpecName: "kube-api-access-bh2l4") pod "27f276b1-32a3-4360-b3fd-a3e9ffecaf1f" (UID: "27f276b1-32a3-4360-b3fd-a3e9ffecaf1f"). InnerVolumeSpecName "kube-api-access-bh2l4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.606740 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "27f276b1-32a3-4360-b3fd-a3e9ffecaf1f" (UID: "27f276b1-32a3-4360-b3fd-a3e9ffecaf1f"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.655041 4910 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.655072 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.655083 4910 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.655093 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bh2l4\" (UniqueName: \"kubernetes.io/projected/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-kube-api-access-bh2l4\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.655103 4910 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.681230 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "27f276b1-32a3-4360-b3fd-a3e9ffecaf1f" (UID: "27f276b1-32a3-4360-b3fd-a3e9ffecaf1f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.707516 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-config-data" (OuterVolumeSpecName: "config-data") pod "27f276b1-32a3-4360-b3fd-a3e9ffecaf1f" (UID: "27f276b1-32a3-4360-b3fd-a3e9ffecaf1f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.757416 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.757465 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.885670 4910 generic.go:334] "Generic (PLEG): container finished" podID="27f276b1-32a3-4360-b3fd-a3e9ffecaf1f" containerID="57741184ba5a88618ca0d86e7f210658789e4e83c95701ae6da926f9f1848f81" exitCode=0 Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.885712 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f","Type":"ContainerDied","Data":"57741184ba5a88618ca0d86e7f210658789e4e83c95701ae6da926f9f1848f81"} Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.885770 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"27f276b1-32a3-4360-b3fd-a3e9ffecaf1f","Type":"ContainerDied","Data":"1f343421927bc9ab89a40901de0aa4a05c6497f3cc53fadd1f2f212a3de1c9be"} Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.885800 4910 scope.go:117] "RemoveContainer" containerID="92d1320417e681ee89e72c5b6f21483ee13bef556b4e17a263fc20c55ec6448f" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.886080 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.914495 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.919105 4910 scope.go:117] "RemoveContainer" containerID="d3ccba0baeada4c24ebb0e227d06a9e60a0770bb358dcb1735058232db65b1c4" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.923073 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.938600 4910 scope.go:117] "RemoveContainer" containerID="fac75f2de0171fc42bd047126a132f1e82c6101d0364e45e74e3a52498cd8f7c" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.942830 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:12:54 crc kubenswrapper[4910]: E0105 22:12:54.943199 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27f276b1-32a3-4360-b3fd-a3e9ffecaf1f" containerName="ceilometer-central-agent" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.943215 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="27f276b1-32a3-4360-b3fd-a3e9ffecaf1f" containerName="ceilometer-central-agent" Jan 05 22:12:54 crc kubenswrapper[4910]: E0105 22:12:54.943234 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27f276b1-32a3-4360-b3fd-a3e9ffecaf1f" containerName="ceilometer-notification-agent" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.943240 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="27f276b1-32a3-4360-b3fd-a3e9ffecaf1f" containerName="ceilometer-notification-agent" Jan 05 22:12:54 crc kubenswrapper[4910]: E0105 22:12:54.943269 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27f276b1-32a3-4360-b3fd-a3e9ffecaf1f" containerName="sg-core" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.943276 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="27f276b1-32a3-4360-b3fd-a3e9ffecaf1f" containerName="sg-core" Jan 05 22:12:54 crc kubenswrapper[4910]: E0105 22:12:54.943287 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27f276b1-32a3-4360-b3fd-a3e9ffecaf1f" containerName="proxy-httpd" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.943292 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="27f276b1-32a3-4360-b3fd-a3e9ffecaf1f" containerName="proxy-httpd" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.943467 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="27f276b1-32a3-4360-b3fd-a3e9ffecaf1f" containerName="ceilometer-notification-agent" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.943484 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="27f276b1-32a3-4360-b3fd-a3e9ffecaf1f" containerName="sg-core" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.943507 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="27f276b1-32a3-4360-b3fd-a3e9ffecaf1f" containerName="ceilometer-central-agent" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.943515 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="27f276b1-32a3-4360-b3fd-a3e9ffecaf1f" containerName="proxy-httpd" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.944995 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.947434 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.948726 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.962516 4910 scope.go:117] "RemoveContainer" containerID="57741184ba5a88618ca0d86e7f210658789e4e83c95701ae6da926f9f1848f81" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.963608 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.988415 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 05 22:12:54 crc kubenswrapper[4910]: I0105 22:12:54.988729 4910 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.010468 4910 scope.go:117] "RemoveContainer" containerID="92d1320417e681ee89e72c5b6f21483ee13bef556b4e17a263fc20c55ec6448f" Jan 05 22:12:55 crc kubenswrapper[4910]: E0105 22:12:55.011060 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"92d1320417e681ee89e72c5b6f21483ee13bef556b4e17a263fc20c55ec6448f\": container with ID starting with 92d1320417e681ee89e72c5b6f21483ee13bef556b4e17a263fc20c55ec6448f not found: ID does not exist" containerID="92d1320417e681ee89e72c5b6f21483ee13bef556b4e17a263fc20c55ec6448f" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.011237 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"92d1320417e681ee89e72c5b6f21483ee13bef556b4e17a263fc20c55ec6448f"} err="failed to get container status \"92d1320417e681ee89e72c5b6f21483ee13bef556b4e17a263fc20c55ec6448f\": rpc error: code = NotFound desc = could not find container \"92d1320417e681ee89e72c5b6f21483ee13bef556b4e17a263fc20c55ec6448f\": container with ID starting with 92d1320417e681ee89e72c5b6f21483ee13bef556b4e17a263fc20c55ec6448f not found: ID does not exist" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.011372 4910 scope.go:117] "RemoveContainer" containerID="d3ccba0baeada4c24ebb0e227d06a9e60a0770bb358dcb1735058232db65b1c4" Jan 05 22:12:55 crc kubenswrapper[4910]: E0105 22:12:55.012172 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d3ccba0baeada4c24ebb0e227d06a9e60a0770bb358dcb1735058232db65b1c4\": container with ID starting with d3ccba0baeada4c24ebb0e227d06a9e60a0770bb358dcb1735058232db65b1c4 not found: ID does not exist" containerID="d3ccba0baeada4c24ebb0e227d06a9e60a0770bb358dcb1735058232db65b1c4" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.012257 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d3ccba0baeada4c24ebb0e227d06a9e60a0770bb358dcb1735058232db65b1c4"} err="failed to get container status \"d3ccba0baeada4c24ebb0e227d06a9e60a0770bb358dcb1735058232db65b1c4\": rpc error: code = NotFound desc = could not find container \"d3ccba0baeada4c24ebb0e227d06a9e60a0770bb358dcb1735058232db65b1c4\": container with ID starting with d3ccba0baeada4c24ebb0e227d06a9e60a0770bb358dcb1735058232db65b1c4 not found: ID does not exist" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.012321 4910 scope.go:117] "RemoveContainer" containerID="fac75f2de0171fc42bd047126a132f1e82c6101d0364e45e74e3a52498cd8f7c" Jan 05 22:12:55 crc kubenswrapper[4910]: E0105 22:12:55.013659 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fac75f2de0171fc42bd047126a132f1e82c6101d0364e45e74e3a52498cd8f7c\": container with ID starting with fac75f2de0171fc42bd047126a132f1e82c6101d0364e45e74e3a52498cd8f7c not found: ID does not exist" containerID="fac75f2de0171fc42bd047126a132f1e82c6101d0364e45e74e3a52498cd8f7c" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.013690 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fac75f2de0171fc42bd047126a132f1e82c6101d0364e45e74e3a52498cd8f7c"} err="failed to get container status \"fac75f2de0171fc42bd047126a132f1e82c6101d0364e45e74e3a52498cd8f7c\": rpc error: code = NotFound desc = could not find container \"fac75f2de0171fc42bd047126a132f1e82c6101d0364e45e74e3a52498cd8f7c\": container with ID starting with fac75f2de0171fc42bd047126a132f1e82c6101d0364e45e74e3a52498cd8f7c not found: ID does not exist" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.013709 4910 scope.go:117] "RemoveContainer" containerID="57741184ba5a88618ca0d86e7f210658789e4e83c95701ae6da926f9f1848f81" Jan 05 22:12:55 crc kubenswrapper[4910]: E0105 22:12:55.016410 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"57741184ba5a88618ca0d86e7f210658789e4e83c95701ae6da926f9f1848f81\": container with ID starting with 57741184ba5a88618ca0d86e7f210658789e4e83c95701ae6da926f9f1848f81 not found: ID does not exist" containerID="57741184ba5a88618ca0d86e7f210658789e4e83c95701ae6da926f9f1848f81" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.016464 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"57741184ba5a88618ca0d86e7f210658789e4e83c95701ae6da926f9f1848f81"} err="failed to get container status \"57741184ba5a88618ca0d86e7f210658789e4e83c95701ae6da926f9f1848f81\": rpc error: code = NotFound desc = could not find container \"57741184ba5a88618ca0d86e7f210658789e4e83c95701ae6da926f9f1848f81\": container with ID starting with 57741184ba5a88618ca0d86e7f210658789e4e83c95701ae6da926f9f1848f81 not found: ID does not exist" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.048411 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:12:55 crc kubenswrapper[4910]: E0105 22:12:55.049052 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle config-data kube-api-access-9ndgr log-httpd run-httpd scripts sg-core-conf-yaml], unattached volumes=[], failed to process volumes=[combined-ca-bundle config-data kube-api-access-9ndgr log-httpd run-httpd scripts sg-core-conf-yaml]: context canceled" pod="openstack/ceilometer-0" podUID="dde039db-3dc0-4755-890d-b9ef83b91335" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.061506 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9ndgr\" (UniqueName: \"kubernetes.io/projected/dde039db-3dc0-4755-890d-b9ef83b91335-kube-api-access-9ndgr\") pod \"ceilometer-0\" (UID: \"dde039db-3dc0-4755-890d-b9ef83b91335\") " pod="openstack/ceilometer-0" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.061562 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dde039db-3dc0-4755-890d-b9ef83b91335-config-data\") pod \"ceilometer-0\" (UID: \"dde039db-3dc0-4755-890d-b9ef83b91335\") " pod="openstack/ceilometer-0" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.061597 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dde039db-3dc0-4755-890d-b9ef83b91335-scripts\") pod \"ceilometer-0\" (UID: \"dde039db-3dc0-4755-890d-b9ef83b91335\") " pod="openstack/ceilometer-0" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.061627 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dde039db-3dc0-4755-890d-b9ef83b91335-run-httpd\") pod \"ceilometer-0\" (UID: \"dde039db-3dc0-4755-890d-b9ef83b91335\") " pod="openstack/ceilometer-0" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.061732 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dde039db-3dc0-4755-890d-b9ef83b91335-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"dde039db-3dc0-4755-890d-b9ef83b91335\") " pod="openstack/ceilometer-0" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.061862 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dde039db-3dc0-4755-890d-b9ef83b91335-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"dde039db-3dc0-4755-890d-b9ef83b91335\") " pod="openstack/ceilometer-0" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.062029 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dde039db-3dc0-4755-890d-b9ef83b91335-log-httpd\") pod \"ceilometer-0\" (UID: \"dde039db-3dc0-4755-890d-b9ef83b91335\") " pod="openstack/ceilometer-0" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.090737 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.164434 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9ndgr\" (UniqueName: \"kubernetes.io/projected/dde039db-3dc0-4755-890d-b9ef83b91335-kube-api-access-9ndgr\") pod \"ceilometer-0\" (UID: \"dde039db-3dc0-4755-890d-b9ef83b91335\") " pod="openstack/ceilometer-0" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.164497 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dde039db-3dc0-4755-890d-b9ef83b91335-config-data\") pod \"ceilometer-0\" (UID: \"dde039db-3dc0-4755-890d-b9ef83b91335\") " pod="openstack/ceilometer-0" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.164540 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dde039db-3dc0-4755-890d-b9ef83b91335-scripts\") pod \"ceilometer-0\" (UID: \"dde039db-3dc0-4755-890d-b9ef83b91335\") " pod="openstack/ceilometer-0" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.164576 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dde039db-3dc0-4755-890d-b9ef83b91335-run-httpd\") pod \"ceilometer-0\" (UID: \"dde039db-3dc0-4755-890d-b9ef83b91335\") " pod="openstack/ceilometer-0" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.164622 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dde039db-3dc0-4755-890d-b9ef83b91335-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"dde039db-3dc0-4755-890d-b9ef83b91335\") " pod="openstack/ceilometer-0" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.164661 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dde039db-3dc0-4755-890d-b9ef83b91335-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"dde039db-3dc0-4755-890d-b9ef83b91335\") " pod="openstack/ceilometer-0" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.164708 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dde039db-3dc0-4755-890d-b9ef83b91335-log-httpd\") pod \"ceilometer-0\" (UID: \"dde039db-3dc0-4755-890d-b9ef83b91335\") " pod="openstack/ceilometer-0" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.165327 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dde039db-3dc0-4755-890d-b9ef83b91335-log-httpd\") pod \"ceilometer-0\" (UID: \"dde039db-3dc0-4755-890d-b9ef83b91335\") " pod="openstack/ceilometer-0" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.169448 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dde039db-3dc0-4755-890d-b9ef83b91335-run-httpd\") pod \"ceilometer-0\" (UID: \"dde039db-3dc0-4755-890d-b9ef83b91335\") " pod="openstack/ceilometer-0" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.170015 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dde039db-3dc0-4755-890d-b9ef83b91335-scripts\") pod \"ceilometer-0\" (UID: \"dde039db-3dc0-4755-890d-b9ef83b91335\") " pod="openstack/ceilometer-0" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.170021 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dde039db-3dc0-4755-890d-b9ef83b91335-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"dde039db-3dc0-4755-890d-b9ef83b91335\") " pod="openstack/ceilometer-0" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.171532 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dde039db-3dc0-4755-890d-b9ef83b91335-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"dde039db-3dc0-4755-890d-b9ef83b91335\") " pod="openstack/ceilometer-0" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.173183 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dde039db-3dc0-4755-890d-b9ef83b91335-config-data\") pod \"ceilometer-0\" (UID: \"dde039db-3dc0-4755-890d-b9ef83b91335\") " pod="openstack/ceilometer-0" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.193849 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9ndgr\" (UniqueName: \"kubernetes.io/projected/dde039db-3dc0-4755-890d-b9ef83b91335-kube-api-access-9ndgr\") pod \"ceilometer-0\" (UID: \"dde039db-3dc0-4755-890d-b9ef83b91335\") " pod="openstack/ceilometer-0" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.894702 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.908787 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.980360 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dde039db-3dc0-4755-890d-b9ef83b91335-log-httpd\") pod \"dde039db-3dc0-4755-890d-b9ef83b91335\" (UID: \"dde039db-3dc0-4755-890d-b9ef83b91335\") " Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.980465 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9ndgr\" (UniqueName: \"kubernetes.io/projected/dde039db-3dc0-4755-890d-b9ef83b91335-kube-api-access-9ndgr\") pod \"dde039db-3dc0-4755-890d-b9ef83b91335\" (UID: \"dde039db-3dc0-4755-890d-b9ef83b91335\") " Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.980498 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dde039db-3dc0-4755-890d-b9ef83b91335-combined-ca-bundle\") pod \"dde039db-3dc0-4755-890d-b9ef83b91335\" (UID: \"dde039db-3dc0-4755-890d-b9ef83b91335\") " Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.980525 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dde039db-3dc0-4755-890d-b9ef83b91335-scripts\") pod \"dde039db-3dc0-4755-890d-b9ef83b91335\" (UID: \"dde039db-3dc0-4755-890d-b9ef83b91335\") " Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.980555 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dde039db-3dc0-4755-890d-b9ef83b91335-config-data\") pod \"dde039db-3dc0-4755-890d-b9ef83b91335\" (UID: \"dde039db-3dc0-4755-890d-b9ef83b91335\") " Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.980753 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dde039db-3dc0-4755-890d-b9ef83b91335-run-httpd\") pod \"dde039db-3dc0-4755-890d-b9ef83b91335\" (UID: \"dde039db-3dc0-4755-890d-b9ef83b91335\") " Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.980779 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dde039db-3dc0-4755-890d-b9ef83b91335-sg-core-conf-yaml\") pod \"dde039db-3dc0-4755-890d-b9ef83b91335\" (UID: \"dde039db-3dc0-4755-890d-b9ef83b91335\") " Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.982372 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dde039db-3dc0-4755-890d-b9ef83b91335-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "dde039db-3dc0-4755-890d-b9ef83b91335" (UID: "dde039db-3dc0-4755-890d-b9ef83b91335"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.985973 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dde039db-3dc0-4755-890d-b9ef83b91335-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "dde039db-3dc0-4755-890d-b9ef83b91335" (UID: "dde039db-3dc0-4755-890d-b9ef83b91335"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.988187 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dde039db-3dc0-4755-890d-b9ef83b91335-config-data" (OuterVolumeSpecName: "config-data") pod "dde039db-3dc0-4755-890d-b9ef83b91335" (UID: "dde039db-3dc0-4755-890d-b9ef83b91335"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.988406 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dde039db-3dc0-4755-890d-b9ef83b91335-kube-api-access-9ndgr" (OuterVolumeSpecName: "kube-api-access-9ndgr") pod "dde039db-3dc0-4755-890d-b9ef83b91335" (UID: "dde039db-3dc0-4755-890d-b9ef83b91335"). InnerVolumeSpecName "kube-api-access-9ndgr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.992348 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dde039db-3dc0-4755-890d-b9ef83b91335-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "dde039db-3dc0-4755-890d-b9ef83b91335" (UID: "dde039db-3dc0-4755-890d-b9ef83b91335"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:55 crc kubenswrapper[4910]: I0105 22:12:55.992623 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dde039db-3dc0-4755-890d-b9ef83b91335-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dde039db-3dc0-4755-890d-b9ef83b91335" (UID: "dde039db-3dc0-4755-890d-b9ef83b91335"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.002689 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dde039db-3dc0-4755-890d-b9ef83b91335-scripts" (OuterVolumeSpecName: "scripts") pod "dde039db-3dc0-4755-890d-b9ef83b91335" (UID: "dde039db-3dc0-4755-890d-b9ef83b91335"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.082578 4910 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dde039db-3dc0-4755-890d-b9ef83b91335-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.082837 4910 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dde039db-3dc0-4755-890d-b9ef83b91335-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.082900 4910 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dde039db-3dc0-4755-890d-b9ef83b91335-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.082986 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9ndgr\" (UniqueName: \"kubernetes.io/projected/dde039db-3dc0-4755-890d-b9ef83b91335-kube-api-access-9ndgr\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.083048 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dde039db-3dc0-4755-890d-b9ef83b91335-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.083106 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dde039db-3dc0-4755-890d-b9ef83b91335-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.083201 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dde039db-3dc0-4755-890d-b9ef83b91335-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.300970 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-5vttc"] Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.302133 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-5vttc" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.387249 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-5vttc"] Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.387824 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/607e486d-f70e-413c-8568-db15e01a3377-operator-scripts\") pod \"nova-api-db-create-5vttc\" (UID: \"607e486d-f70e-413c-8568-db15e01a3377\") " pod="openstack/nova-api-db-create-5vttc" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.387945 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8d7n\" (UniqueName: \"kubernetes.io/projected/607e486d-f70e-413c-8568-db15e01a3377-kube-api-access-g8d7n\") pod \"nova-api-db-create-5vttc\" (UID: \"607e486d-f70e-413c-8568-db15e01a3377\") " pod="openstack/nova-api-db-create-5vttc" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.444310 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-mpds2"] Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.445557 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-mpds2" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.479006 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-mpds2"] Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.489293 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-28c4p\" (UniqueName: \"kubernetes.io/projected/77d19d69-6202-4594-8b11-e02ff86dc8f6-kube-api-access-28c4p\") pod \"nova-cell0-db-create-mpds2\" (UID: \"77d19d69-6202-4594-8b11-e02ff86dc8f6\") " pod="openstack/nova-cell0-db-create-mpds2" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.489357 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8d7n\" (UniqueName: \"kubernetes.io/projected/607e486d-f70e-413c-8568-db15e01a3377-kube-api-access-g8d7n\") pod \"nova-api-db-create-5vttc\" (UID: \"607e486d-f70e-413c-8568-db15e01a3377\") " pod="openstack/nova-api-db-create-5vttc" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.489394 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/77d19d69-6202-4594-8b11-e02ff86dc8f6-operator-scripts\") pod \"nova-cell0-db-create-mpds2\" (UID: \"77d19d69-6202-4594-8b11-e02ff86dc8f6\") " pod="openstack/nova-cell0-db-create-mpds2" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.489467 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/607e486d-f70e-413c-8568-db15e01a3377-operator-scripts\") pod \"nova-api-db-create-5vttc\" (UID: \"607e486d-f70e-413c-8568-db15e01a3377\") " pod="openstack/nova-api-db-create-5vttc" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.490255 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/607e486d-f70e-413c-8568-db15e01a3377-operator-scripts\") pod \"nova-api-db-create-5vttc\" (UID: \"607e486d-f70e-413c-8568-db15e01a3377\") " pod="openstack/nova-api-db-create-5vttc" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.514723 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8d7n\" (UniqueName: \"kubernetes.io/projected/607e486d-f70e-413c-8568-db15e01a3377-kube-api-access-g8d7n\") pod \"nova-api-db-create-5vttc\" (UID: \"607e486d-f70e-413c-8568-db15e01a3377\") " pod="openstack/nova-api-db-create-5vttc" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.547775 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-b0f9-account-create-update-jn9k7"] Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.548941 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-b0f9-account-create-update-jn9k7" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.556535 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.573075 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-b0f9-account-create-update-jn9k7"] Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.594351 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8900433-ac66-443f-8d83-72fefd413abd-operator-scripts\") pod \"nova-api-b0f9-account-create-update-jn9k7\" (UID: \"f8900433-ac66-443f-8d83-72fefd413abd\") " pod="openstack/nova-api-b0f9-account-create-update-jn9k7" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.594424 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-28c4p\" (UniqueName: \"kubernetes.io/projected/77d19d69-6202-4594-8b11-e02ff86dc8f6-kube-api-access-28c4p\") pod \"nova-cell0-db-create-mpds2\" (UID: \"77d19d69-6202-4594-8b11-e02ff86dc8f6\") " pod="openstack/nova-cell0-db-create-mpds2" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.594469 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/77d19d69-6202-4594-8b11-e02ff86dc8f6-operator-scripts\") pod \"nova-cell0-db-create-mpds2\" (UID: \"77d19d69-6202-4594-8b11-e02ff86dc8f6\") " pod="openstack/nova-cell0-db-create-mpds2" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.594509 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5xrn8\" (UniqueName: \"kubernetes.io/projected/f8900433-ac66-443f-8d83-72fefd413abd-kube-api-access-5xrn8\") pod \"nova-api-b0f9-account-create-update-jn9k7\" (UID: \"f8900433-ac66-443f-8d83-72fefd413abd\") " pod="openstack/nova-api-b0f9-account-create-update-jn9k7" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.595546 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/77d19d69-6202-4594-8b11-e02ff86dc8f6-operator-scripts\") pod \"nova-cell0-db-create-mpds2\" (UID: \"77d19d69-6202-4594-8b11-e02ff86dc8f6\") " pod="openstack/nova-cell0-db-create-mpds2" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.620749 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-5vttc" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.639199 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-bjzk5"] Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.641224 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-bjzk5" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.642933 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-28c4p\" (UniqueName: \"kubernetes.io/projected/77d19d69-6202-4594-8b11-e02ff86dc8f6-kube-api-access-28c4p\") pod \"nova-cell0-db-create-mpds2\" (UID: \"77d19d69-6202-4594-8b11-e02ff86dc8f6\") " pod="openstack/nova-cell0-db-create-mpds2" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.652801 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-bjzk5"] Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.698279 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a90f601c-a3b6-496b-9f50-2ecde1cb123b-operator-scripts\") pod \"nova-cell1-db-create-bjzk5\" (UID: \"a90f601c-a3b6-496b-9f50-2ecde1cb123b\") " pod="openstack/nova-cell1-db-create-bjzk5" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.698387 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8900433-ac66-443f-8d83-72fefd413abd-operator-scripts\") pod \"nova-api-b0f9-account-create-update-jn9k7\" (UID: \"f8900433-ac66-443f-8d83-72fefd413abd\") " pod="openstack/nova-api-b0f9-account-create-update-jn9k7" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.698486 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6pxvw\" (UniqueName: \"kubernetes.io/projected/a90f601c-a3b6-496b-9f50-2ecde1cb123b-kube-api-access-6pxvw\") pod \"nova-cell1-db-create-bjzk5\" (UID: \"a90f601c-a3b6-496b-9f50-2ecde1cb123b\") " pod="openstack/nova-cell1-db-create-bjzk5" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.698553 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5xrn8\" (UniqueName: \"kubernetes.io/projected/f8900433-ac66-443f-8d83-72fefd413abd-kube-api-access-5xrn8\") pod \"nova-api-b0f9-account-create-update-jn9k7\" (UID: \"f8900433-ac66-443f-8d83-72fefd413abd\") " pod="openstack/nova-api-b0f9-account-create-update-jn9k7" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.699567 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8900433-ac66-443f-8d83-72fefd413abd-operator-scripts\") pod \"nova-api-b0f9-account-create-update-jn9k7\" (UID: \"f8900433-ac66-443f-8d83-72fefd413abd\") " pod="openstack/nova-api-b0f9-account-create-update-jn9k7" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.739895 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5xrn8\" (UniqueName: \"kubernetes.io/projected/f8900433-ac66-443f-8d83-72fefd413abd-kube-api-access-5xrn8\") pod \"nova-api-b0f9-account-create-update-jn9k7\" (UID: \"f8900433-ac66-443f-8d83-72fefd413abd\") " pod="openstack/nova-api-b0f9-account-create-update-jn9k7" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.749011 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27f276b1-32a3-4360-b3fd-a3e9ffecaf1f" path="/var/lib/kubelet/pods/27f276b1-32a3-4360-b3fd-a3e9ffecaf1f/volumes" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.767833 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-mpds2" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.801651 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a90f601c-a3b6-496b-9f50-2ecde1cb123b-operator-scripts\") pod \"nova-cell1-db-create-bjzk5\" (UID: \"a90f601c-a3b6-496b-9f50-2ecde1cb123b\") " pod="openstack/nova-cell1-db-create-bjzk5" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.801799 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6pxvw\" (UniqueName: \"kubernetes.io/projected/a90f601c-a3b6-496b-9f50-2ecde1cb123b-kube-api-access-6pxvw\") pod \"nova-cell1-db-create-bjzk5\" (UID: \"a90f601c-a3b6-496b-9f50-2ecde1cb123b\") " pod="openstack/nova-cell1-db-create-bjzk5" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.807234 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a90f601c-a3b6-496b-9f50-2ecde1cb123b-operator-scripts\") pod \"nova-cell1-db-create-bjzk5\" (UID: \"a90f601c-a3b6-496b-9f50-2ecde1cb123b\") " pod="openstack/nova-cell1-db-create-bjzk5" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.834855 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6pxvw\" (UniqueName: \"kubernetes.io/projected/a90f601c-a3b6-496b-9f50-2ecde1cb123b-kube-api-access-6pxvw\") pod \"nova-cell1-db-create-bjzk5\" (UID: \"a90f601c-a3b6-496b-9f50-2ecde1cb123b\") " pod="openstack/nova-cell1-db-create-bjzk5" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.837367 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-941b-account-create-update-rj8nc"] Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.839043 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-941b-account-create-update-rj8nc" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.844224 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.849890 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-941b-account-create-update-rj8nc"] Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.874875 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-b0f9-account-create-update-jn9k7" Jan 05 22:12:56 crc kubenswrapper[4910]: I0105 22:12:56.913488 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.000066 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.014111 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2fd0274e-5312-4b6e-be52-03e243ac4e6b-operator-scripts\") pod \"nova-cell0-941b-account-create-update-rj8nc\" (UID: \"2fd0274e-5312-4b6e-be52-03e243ac4e6b\") " pod="openstack/nova-cell0-941b-account-create-update-rj8nc" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.014203 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxxhg\" (UniqueName: \"kubernetes.io/projected/2fd0274e-5312-4b6e-be52-03e243ac4e6b-kube-api-access-qxxhg\") pod \"nova-cell0-941b-account-create-update-rj8nc\" (UID: \"2fd0274e-5312-4b6e-be52-03e243ac4e6b\") " pod="openstack/nova-cell0-941b-account-create-update-rj8nc" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.038034 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.062670 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.069654 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.084034 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.085904 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.085952 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.088794 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-bjzk5" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.100947 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-398d-account-create-update-7q6nt"] Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.102675 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-398d-account-create-update-7q6nt" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.109141 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.118110 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2fd0274e-5312-4b6e-be52-03e243ac4e6b-operator-scripts\") pod \"nova-cell0-941b-account-create-update-rj8nc\" (UID: \"2fd0274e-5312-4b6e-be52-03e243ac4e6b\") " pod="openstack/nova-cell0-941b-account-create-update-rj8nc" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.118197 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxxhg\" (UniqueName: \"kubernetes.io/projected/2fd0274e-5312-4b6e-be52-03e243ac4e6b-kube-api-access-qxxhg\") pod \"nova-cell0-941b-account-create-update-rj8nc\" (UID: \"2fd0274e-5312-4b6e-be52-03e243ac4e6b\") " pod="openstack/nova-cell0-941b-account-create-update-rj8nc" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.118975 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-398d-account-create-update-7q6nt"] Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.122144 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2fd0274e-5312-4b6e-be52-03e243ac4e6b-operator-scripts\") pod \"nova-cell0-941b-account-create-update-rj8nc\" (UID: \"2fd0274e-5312-4b6e-be52-03e243ac4e6b\") " pod="openstack/nova-cell0-941b-account-create-update-rj8nc" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.149577 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxxhg\" (UniqueName: \"kubernetes.io/projected/2fd0274e-5312-4b6e-be52-03e243ac4e6b-kube-api-access-qxxhg\") pod \"nova-cell0-941b-account-create-update-rj8nc\" (UID: \"2fd0274e-5312-4b6e-be52-03e243ac4e6b\") " pod="openstack/nova-cell0-941b-account-create-update-rj8nc" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.206288 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-941b-account-create-update-rj8nc" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.220570 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8797e759-783d-406e-b7e4-2f184019d3a4-log-httpd\") pod \"ceilometer-0\" (UID: \"8797e759-783d-406e-b7e4-2f184019d3a4\") " pod="openstack/ceilometer-0" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.220634 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8797e759-783d-406e-b7e4-2f184019d3a4-config-data\") pod \"ceilometer-0\" (UID: \"8797e759-783d-406e-b7e4-2f184019d3a4\") " pod="openstack/ceilometer-0" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.220654 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8797e759-783d-406e-b7e4-2f184019d3a4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8797e759-783d-406e-b7e4-2f184019d3a4\") " pod="openstack/ceilometer-0" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.220700 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8797e759-783d-406e-b7e4-2f184019d3a4-scripts\") pod \"ceilometer-0\" (UID: \"8797e759-783d-406e-b7e4-2f184019d3a4\") " pod="openstack/ceilometer-0" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.220765 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8797e759-783d-406e-b7e4-2f184019d3a4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8797e759-783d-406e-b7e4-2f184019d3a4\") " pod="openstack/ceilometer-0" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.220799 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w5fdj\" (UniqueName: \"kubernetes.io/projected/8797e759-783d-406e-b7e4-2f184019d3a4-kube-api-access-w5fdj\") pod \"ceilometer-0\" (UID: \"8797e759-783d-406e-b7e4-2f184019d3a4\") " pod="openstack/ceilometer-0" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.220834 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8797e759-783d-406e-b7e4-2f184019d3a4-run-httpd\") pod \"ceilometer-0\" (UID: \"8797e759-783d-406e-b7e4-2f184019d3a4\") " pod="openstack/ceilometer-0" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.220855 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8892663e-d012-478a-99ea-8cff1f7c9b35-operator-scripts\") pod \"nova-cell1-398d-account-create-update-7q6nt\" (UID: \"8892663e-d012-478a-99ea-8cff1f7c9b35\") " pod="openstack/nova-cell1-398d-account-create-update-7q6nt" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.220890 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9nhth\" (UniqueName: \"kubernetes.io/projected/8892663e-d012-478a-99ea-8cff1f7c9b35-kube-api-access-9nhth\") pod \"nova-cell1-398d-account-create-update-7q6nt\" (UID: \"8892663e-d012-478a-99ea-8cff1f7c9b35\") " pod="openstack/nova-cell1-398d-account-create-update-7q6nt" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.285450 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-5vttc"] Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.323049 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8797e759-783d-406e-b7e4-2f184019d3a4-scripts\") pod \"ceilometer-0\" (UID: \"8797e759-783d-406e-b7e4-2f184019d3a4\") " pod="openstack/ceilometer-0" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.323166 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8797e759-783d-406e-b7e4-2f184019d3a4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8797e759-783d-406e-b7e4-2f184019d3a4\") " pod="openstack/ceilometer-0" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.323202 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w5fdj\" (UniqueName: \"kubernetes.io/projected/8797e759-783d-406e-b7e4-2f184019d3a4-kube-api-access-w5fdj\") pod \"ceilometer-0\" (UID: \"8797e759-783d-406e-b7e4-2f184019d3a4\") " pod="openstack/ceilometer-0" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.323248 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8797e759-783d-406e-b7e4-2f184019d3a4-run-httpd\") pod \"ceilometer-0\" (UID: \"8797e759-783d-406e-b7e4-2f184019d3a4\") " pod="openstack/ceilometer-0" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.323273 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8892663e-d012-478a-99ea-8cff1f7c9b35-operator-scripts\") pod \"nova-cell1-398d-account-create-update-7q6nt\" (UID: \"8892663e-d012-478a-99ea-8cff1f7c9b35\") " pod="openstack/nova-cell1-398d-account-create-update-7q6nt" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.323319 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9nhth\" (UniqueName: \"kubernetes.io/projected/8892663e-d012-478a-99ea-8cff1f7c9b35-kube-api-access-9nhth\") pod \"nova-cell1-398d-account-create-update-7q6nt\" (UID: \"8892663e-d012-478a-99ea-8cff1f7c9b35\") " pod="openstack/nova-cell1-398d-account-create-update-7q6nt" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.323364 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8797e759-783d-406e-b7e4-2f184019d3a4-log-httpd\") pod \"ceilometer-0\" (UID: \"8797e759-783d-406e-b7e4-2f184019d3a4\") " pod="openstack/ceilometer-0" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.323408 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8797e759-783d-406e-b7e4-2f184019d3a4-config-data\") pod \"ceilometer-0\" (UID: \"8797e759-783d-406e-b7e4-2f184019d3a4\") " pod="openstack/ceilometer-0" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.323434 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8797e759-783d-406e-b7e4-2f184019d3a4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8797e759-783d-406e-b7e4-2f184019d3a4\") " pod="openstack/ceilometer-0" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.324846 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8797e759-783d-406e-b7e4-2f184019d3a4-log-httpd\") pod \"ceilometer-0\" (UID: \"8797e759-783d-406e-b7e4-2f184019d3a4\") " pod="openstack/ceilometer-0" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.325030 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8892663e-d012-478a-99ea-8cff1f7c9b35-operator-scripts\") pod \"nova-cell1-398d-account-create-update-7q6nt\" (UID: \"8892663e-d012-478a-99ea-8cff1f7c9b35\") " pod="openstack/nova-cell1-398d-account-create-update-7q6nt" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.325245 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8797e759-783d-406e-b7e4-2f184019d3a4-run-httpd\") pod \"ceilometer-0\" (UID: \"8797e759-783d-406e-b7e4-2f184019d3a4\") " pod="openstack/ceilometer-0" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.348639 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8797e759-783d-406e-b7e4-2f184019d3a4-scripts\") pod \"ceilometer-0\" (UID: \"8797e759-783d-406e-b7e4-2f184019d3a4\") " pod="openstack/ceilometer-0" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.351254 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w5fdj\" (UniqueName: \"kubernetes.io/projected/8797e759-783d-406e-b7e4-2f184019d3a4-kube-api-access-w5fdj\") pod \"ceilometer-0\" (UID: \"8797e759-783d-406e-b7e4-2f184019d3a4\") " pod="openstack/ceilometer-0" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.353531 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9nhth\" (UniqueName: \"kubernetes.io/projected/8892663e-d012-478a-99ea-8cff1f7c9b35-kube-api-access-9nhth\") pod \"nova-cell1-398d-account-create-update-7q6nt\" (UID: \"8892663e-d012-478a-99ea-8cff1f7c9b35\") " pod="openstack/nova-cell1-398d-account-create-update-7q6nt" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.356548 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8797e759-783d-406e-b7e4-2f184019d3a4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8797e759-783d-406e-b7e4-2f184019d3a4\") " pod="openstack/ceilometer-0" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.363188 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8797e759-783d-406e-b7e4-2f184019d3a4-config-data\") pod \"ceilometer-0\" (UID: \"8797e759-783d-406e-b7e4-2f184019d3a4\") " pod="openstack/ceilometer-0" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.376154 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8797e759-783d-406e-b7e4-2f184019d3a4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8797e759-783d-406e-b7e4-2f184019d3a4\") " pod="openstack/ceilometer-0" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.399340 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.427700 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-398d-account-create-update-7q6nt" Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.521707 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-b0f9-account-create-update-jn9k7"] Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.540835 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-mpds2"] Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.770656 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-bjzk5"] Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.914627 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-941b-account-create-update-rj8nc"] Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.925041 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-bjzk5" event={"ID":"a90f601c-a3b6-496b-9f50-2ecde1cb123b","Type":"ContainerStarted","Data":"71ad398ee89bd2a23e317cd73f3e873a8e505e533fa145bd606e9dd616ba8720"} Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.927887 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-b0f9-account-create-update-jn9k7" event={"ID":"f8900433-ac66-443f-8d83-72fefd413abd","Type":"ContainerStarted","Data":"0c6b7be5b69dabfc5939f0acd361eb47d35c3dbe3c94f99724aa620d3a6a1f63"} Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.932070 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-mpds2" event={"ID":"77d19d69-6202-4594-8b11-e02ff86dc8f6","Type":"ContainerStarted","Data":"8aa476ba0b2f085b967453998e6dff5bf6c8832ee9b2a97098c52da6c0b46ca1"} Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.939841 4910 generic.go:334] "Generic (PLEG): container finished" podID="607e486d-f70e-413c-8568-db15e01a3377" containerID="2c488eda0d2dfa5be7f48895c6bc7e782d4dd9655987f5f376f5c2c6127ec222" exitCode=0 Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.939885 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-5vttc" event={"ID":"607e486d-f70e-413c-8568-db15e01a3377","Type":"ContainerDied","Data":"2c488eda0d2dfa5be7f48895c6bc7e782d4dd9655987f5f376f5c2c6127ec222"} Jan 05 22:12:57 crc kubenswrapper[4910]: I0105 22:12:57.939921 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-5vttc" event={"ID":"607e486d-f70e-413c-8568-db15e01a3377","Type":"ContainerStarted","Data":"18f42bfa08ce6f40200003f7b955b68d0d1086dde1debb470909ef7fb62ecccb"} Jan 05 22:12:58 crc kubenswrapper[4910]: I0105 22:12:58.084723 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:12:58 crc kubenswrapper[4910]: W0105 22:12:58.085503 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8797e759_783d_406e_b7e4_2f184019d3a4.slice/crio-423b250cb3286215590a96890151b078d2d38fd6808f33367c464804e9fa9733 WatchSource:0}: Error finding container 423b250cb3286215590a96890151b078d2d38fd6808f33367c464804e9fa9733: Status 404 returned error can't find the container with id 423b250cb3286215590a96890151b078d2d38fd6808f33367c464804e9fa9733 Jan 05 22:12:58 crc kubenswrapper[4910]: I0105 22:12:58.091288 4910 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 05 22:12:58 crc kubenswrapper[4910]: I0105 22:12:58.178544 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-398d-account-create-update-7q6nt"] Jan 05 22:12:58 crc kubenswrapper[4910]: W0105 22:12:58.198292 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8892663e_d012_478a_99ea_8cff1f7c9b35.slice/crio-7574849ad35b54d1a4736f11bebfcb0adf6351bc46e8b20dfa1df25193f552a1 WatchSource:0}: Error finding container 7574849ad35b54d1a4736f11bebfcb0adf6351bc46e8b20dfa1df25193f552a1: Status 404 returned error can't find the container with id 7574849ad35b54d1a4736f11bebfcb0adf6351bc46e8b20dfa1df25193f552a1 Jan 05 22:12:58 crc kubenswrapper[4910]: I0105 22:12:58.738316 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dde039db-3dc0-4755-890d-b9ef83b91335" path="/var/lib/kubelet/pods/dde039db-3dc0-4755-890d-b9ef83b91335/volumes" Jan 05 22:12:58 crc kubenswrapper[4910]: I0105 22:12:58.965080 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-398d-account-create-update-7q6nt" event={"ID":"8892663e-d012-478a-99ea-8cff1f7c9b35","Type":"ContainerStarted","Data":"cae8bd0cb87895a687cbd5f20d318700417d702fab40faf85596436cd24929d1"} Jan 05 22:12:58 crc kubenswrapper[4910]: I0105 22:12:58.965594 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-398d-account-create-update-7q6nt" event={"ID":"8892663e-d012-478a-99ea-8cff1f7c9b35","Type":"ContainerStarted","Data":"7574849ad35b54d1a4736f11bebfcb0adf6351bc46e8b20dfa1df25193f552a1"} Jan 05 22:12:58 crc kubenswrapper[4910]: I0105 22:12:58.969561 4910 generic.go:334] "Generic (PLEG): container finished" podID="a90f601c-a3b6-496b-9f50-2ecde1cb123b" containerID="145b76ee96ce39fface07ddb7e7426e99956691dcfb26b5588c0c4adce94f7d5" exitCode=0 Jan 05 22:12:58 crc kubenswrapper[4910]: I0105 22:12:58.969670 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-bjzk5" event={"ID":"a90f601c-a3b6-496b-9f50-2ecde1cb123b","Type":"ContainerDied","Data":"145b76ee96ce39fface07ddb7e7426e99956691dcfb26b5588c0c4adce94f7d5"} Jan 05 22:12:58 crc kubenswrapper[4910]: I0105 22:12:58.976840 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8797e759-783d-406e-b7e4-2f184019d3a4","Type":"ContainerStarted","Data":"423b250cb3286215590a96890151b078d2d38fd6808f33367c464804e9fa9733"} Jan 05 22:12:58 crc kubenswrapper[4910]: I0105 22:12:58.977132 4910 generic.go:334] "Generic (PLEG): container finished" podID="f8900433-ac66-443f-8d83-72fefd413abd" containerID="e76acc70d2f3c0357d76eabf5265baa098e43cc6676d8f72632539c98f35b12e" exitCode=0 Jan 05 22:12:58 crc kubenswrapper[4910]: I0105 22:12:58.977338 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-b0f9-account-create-update-jn9k7" event={"ID":"f8900433-ac66-443f-8d83-72fefd413abd","Type":"ContainerDied","Data":"e76acc70d2f3c0357d76eabf5265baa098e43cc6676d8f72632539c98f35b12e"} Jan 05 22:12:58 crc kubenswrapper[4910]: I0105 22:12:58.979716 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-941b-account-create-update-rj8nc" event={"ID":"2fd0274e-5312-4b6e-be52-03e243ac4e6b","Type":"ContainerStarted","Data":"1352af66477e9c4c651d53fee72ab2422bf9224b47dd0d348acdbc01b60ee9ae"} Jan 05 22:12:58 crc kubenswrapper[4910]: I0105 22:12:58.979749 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-941b-account-create-update-rj8nc" event={"ID":"2fd0274e-5312-4b6e-be52-03e243ac4e6b","Type":"ContainerStarted","Data":"03673ad2e1d1ebd41342fa277e176e19e047320c080b9498f5f695d2ca6cdb37"} Jan 05 22:12:58 crc kubenswrapper[4910]: I0105 22:12:58.980941 4910 generic.go:334] "Generic (PLEG): container finished" podID="77d19d69-6202-4594-8b11-e02ff86dc8f6" containerID="50328ae4a92fd73ed7c1a91c0b9f3e496e9daedafb9457a9a7502901363c23bf" exitCode=0 Jan 05 22:12:58 crc kubenswrapper[4910]: I0105 22:12:58.981214 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-mpds2" event={"ID":"77d19d69-6202-4594-8b11-e02ff86dc8f6","Type":"ContainerDied","Data":"50328ae4a92fd73ed7c1a91c0b9f3e496e9daedafb9457a9a7502901363c23bf"} Jan 05 22:12:59 crc kubenswrapper[4910]: I0105 22:12:59.005588 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-398d-account-create-update-7q6nt" podStartSLOduration=3.005568473 podStartE2EDuration="3.005568473s" podCreationTimestamp="2026-01-05 22:12:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:12:58.999223584 +0000 UTC m=+1310.576721254" watchObservedRunningTime="2026-01-05 22:12:59.005568473 +0000 UTC m=+1310.583066143" Jan 05 22:12:59 crc kubenswrapper[4910]: I0105 22:12:59.132250 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-941b-account-create-update-rj8nc" podStartSLOduration=3.132226425 podStartE2EDuration="3.132226425s" podCreationTimestamp="2026-01-05 22:12:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:12:59.126790198 +0000 UTC m=+1310.704287868" watchObservedRunningTime="2026-01-05 22:12:59.132226425 +0000 UTC m=+1310.709724095" Jan 05 22:12:59 crc kubenswrapper[4910]: I0105 22:12:59.679755 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-5vttc" Jan 05 22:12:59 crc kubenswrapper[4910]: I0105 22:12:59.686038 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/607e486d-f70e-413c-8568-db15e01a3377-operator-scripts\") pod \"607e486d-f70e-413c-8568-db15e01a3377\" (UID: \"607e486d-f70e-413c-8568-db15e01a3377\") " Jan 05 22:12:59 crc kubenswrapper[4910]: I0105 22:12:59.686095 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g8d7n\" (UniqueName: \"kubernetes.io/projected/607e486d-f70e-413c-8568-db15e01a3377-kube-api-access-g8d7n\") pod \"607e486d-f70e-413c-8568-db15e01a3377\" (UID: \"607e486d-f70e-413c-8568-db15e01a3377\") " Jan 05 22:12:59 crc kubenswrapper[4910]: I0105 22:12:59.686736 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/607e486d-f70e-413c-8568-db15e01a3377-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "607e486d-f70e-413c-8568-db15e01a3377" (UID: "607e486d-f70e-413c-8568-db15e01a3377"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:12:59 crc kubenswrapper[4910]: I0105 22:12:59.686900 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/607e486d-f70e-413c-8568-db15e01a3377-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:59 crc kubenswrapper[4910]: I0105 22:12:59.690849 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/607e486d-f70e-413c-8568-db15e01a3377-kube-api-access-g8d7n" (OuterVolumeSpecName: "kube-api-access-g8d7n") pod "607e486d-f70e-413c-8568-db15e01a3377" (UID: "607e486d-f70e-413c-8568-db15e01a3377"). InnerVolumeSpecName "kube-api-access-g8d7n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:12:59 crc kubenswrapper[4910]: I0105 22:12:59.789043 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g8d7n\" (UniqueName: \"kubernetes.io/projected/607e486d-f70e-413c-8568-db15e01a3377-kube-api-access-g8d7n\") on node \"crc\" DevicePath \"\"" Jan 05 22:12:59 crc kubenswrapper[4910]: I0105 22:12:59.992777 4910 generic.go:334] "Generic (PLEG): container finished" podID="2fd0274e-5312-4b6e-be52-03e243ac4e6b" containerID="1352af66477e9c4c651d53fee72ab2422bf9224b47dd0d348acdbc01b60ee9ae" exitCode=0 Jan 05 22:12:59 crc kubenswrapper[4910]: I0105 22:12:59.992877 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-941b-account-create-update-rj8nc" event={"ID":"2fd0274e-5312-4b6e-be52-03e243ac4e6b","Type":"ContainerDied","Data":"1352af66477e9c4c651d53fee72ab2422bf9224b47dd0d348acdbc01b60ee9ae"} Jan 05 22:12:59 crc kubenswrapper[4910]: I0105 22:12:59.994474 4910 generic.go:334] "Generic (PLEG): container finished" podID="8892663e-d012-478a-99ea-8cff1f7c9b35" containerID="cae8bd0cb87895a687cbd5f20d318700417d702fab40faf85596436cd24929d1" exitCode=0 Jan 05 22:12:59 crc kubenswrapper[4910]: I0105 22:12:59.994551 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-398d-account-create-update-7q6nt" event={"ID":"8892663e-d012-478a-99ea-8cff1f7c9b35","Type":"ContainerDied","Data":"cae8bd0cb87895a687cbd5f20d318700417d702fab40faf85596436cd24929d1"} Jan 05 22:12:59 crc kubenswrapper[4910]: I0105 22:12:59.996643 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-5vttc" event={"ID":"607e486d-f70e-413c-8568-db15e01a3377","Type":"ContainerDied","Data":"18f42bfa08ce6f40200003f7b955b68d0d1086dde1debb470909ef7fb62ecccb"} Jan 05 22:12:59 crc kubenswrapper[4910]: I0105 22:12:59.996680 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="18f42bfa08ce6f40200003f7b955b68d0d1086dde1debb470909ef7fb62ecccb" Jan 05 22:12:59 crc kubenswrapper[4910]: I0105 22:12:59.996653 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-5vttc" Jan 05 22:12:59 crc kubenswrapper[4910]: I0105 22:12:59.998771 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8797e759-783d-406e-b7e4-2f184019d3a4","Type":"ContainerStarted","Data":"8b2aaeeda5db7456c91ccc8d6a469af50cff132146c8f0688cf6672f7d4e7a1d"} Jan 05 22:13:00 crc kubenswrapper[4910]: I0105 22:13:00.247669 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 05 22:13:00 crc kubenswrapper[4910]: I0105 22:13:00.254368 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 05 22:13:00 crc kubenswrapper[4910]: I0105 22:13:00.327156 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 05 22:13:00 crc kubenswrapper[4910]: I0105 22:13:00.344864 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 05 22:13:00 crc kubenswrapper[4910]: I0105 22:13:00.610400 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-b0f9-account-create-update-jn9k7" Jan 05 22:13:00 crc kubenswrapper[4910]: I0105 22:13:00.619140 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-bjzk5" Jan 05 22:13:00 crc kubenswrapper[4910]: I0105 22:13:00.658671 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-mpds2" Jan 05 22:13:00 crc kubenswrapper[4910]: I0105 22:13:00.715586 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-28c4p\" (UniqueName: \"kubernetes.io/projected/77d19d69-6202-4594-8b11-e02ff86dc8f6-kube-api-access-28c4p\") pod \"77d19d69-6202-4594-8b11-e02ff86dc8f6\" (UID: \"77d19d69-6202-4594-8b11-e02ff86dc8f6\") " Jan 05 22:13:00 crc kubenswrapper[4910]: I0105 22:13:00.715721 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6pxvw\" (UniqueName: \"kubernetes.io/projected/a90f601c-a3b6-496b-9f50-2ecde1cb123b-kube-api-access-6pxvw\") pod \"a90f601c-a3b6-496b-9f50-2ecde1cb123b\" (UID: \"a90f601c-a3b6-496b-9f50-2ecde1cb123b\") " Jan 05 22:13:00 crc kubenswrapper[4910]: I0105 22:13:00.715783 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/77d19d69-6202-4594-8b11-e02ff86dc8f6-operator-scripts\") pod \"77d19d69-6202-4594-8b11-e02ff86dc8f6\" (UID: \"77d19d69-6202-4594-8b11-e02ff86dc8f6\") " Jan 05 22:13:00 crc kubenswrapper[4910]: I0105 22:13:00.715893 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8900433-ac66-443f-8d83-72fefd413abd-operator-scripts\") pod \"f8900433-ac66-443f-8d83-72fefd413abd\" (UID: \"f8900433-ac66-443f-8d83-72fefd413abd\") " Jan 05 22:13:00 crc kubenswrapper[4910]: I0105 22:13:00.716050 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a90f601c-a3b6-496b-9f50-2ecde1cb123b-operator-scripts\") pod \"a90f601c-a3b6-496b-9f50-2ecde1cb123b\" (UID: \"a90f601c-a3b6-496b-9f50-2ecde1cb123b\") " Jan 05 22:13:00 crc kubenswrapper[4910]: I0105 22:13:00.716079 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5xrn8\" (UniqueName: \"kubernetes.io/projected/f8900433-ac66-443f-8d83-72fefd413abd-kube-api-access-5xrn8\") pod \"f8900433-ac66-443f-8d83-72fefd413abd\" (UID: \"f8900433-ac66-443f-8d83-72fefd413abd\") " Jan 05 22:13:00 crc kubenswrapper[4910]: I0105 22:13:00.717173 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/77d19d69-6202-4594-8b11-e02ff86dc8f6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "77d19d69-6202-4594-8b11-e02ff86dc8f6" (UID: "77d19d69-6202-4594-8b11-e02ff86dc8f6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:13:00 crc kubenswrapper[4910]: I0105 22:13:00.717896 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8900433-ac66-443f-8d83-72fefd413abd-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f8900433-ac66-443f-8d83-72fefd413abd" (UID: "f8900433-ac66-443f-8d83-72fefd413abd"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:13:00 crc kubenswrapper[4910]: I0105 22:13:00.718255 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a90f601c-a3b6-496b-9f50-2ecde1cb123b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a90f601c-a3b6-496b-9f50-2ecde1cb123b" (UID: "a90f601c-a3b6-496b-9f50-2ecde1cb123b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:13:00 crc kubenswrapper[4910]: I0105 22:13:00.726765 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8900433-ac66-443f-8d83-72fefd413abd-kube-api-access-5xrn8" (OuterVolumeSpecName: "kube-api-access-5xrn8") pod "f8900433-ac66-443f-8d83-72fefd413abd" (UID: "f8900433-ac66-443f-8d83-72fefd413abd"). InnerVolumeSpecName "kube-api-access-5xrn8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:13:00 crc kubenswrapper[4910]: I0105 22:13:00.727349 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a90f601c-a3b6-496b-9f50-2ecde1cb123b-kube-api-access-6pxvw" (OuterVolumeSpecName: "kube-api-access-6pxvw") pod "a90f601c-a3b6-496b-9f50-2ecde1cb123b" (UID: "a90f601c-a3b6-496b-9f50-2ecde1cb123b"). InnerVolumeSpecName "kube-api-access-6pxvw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:13:00 crc kubenswrapper[4910]: I0105 22:13:00.728787 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77d19d69-6202-4594-8b11-e02ff86dc8f6-kube-api-access-28c4p" (OuterVolumeSpecName: "kube-api-access-28c4p") pod "77d19d69-6202-4594-8b11-e02ff86dc8f6" (UID: "77d19d69-6202-4594-8b11-e02ff86dc8f6"). InnerVolumeSpecName "kube-api-access-28c4p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:13:00 crc kubenswrapper[4910]: I0105 22:13:00.817811 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-28c4p\" (UniqueName: \"kubernetes.io/projected/77d19d69-6202-4594-8b11-e02ff86dc8f6-kube-api-access-28c4p\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:00 crc kubenswrapper[4910]: I0105 22:13:00.817844 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6pxvw\" (UniqueName: \"kubernetes.io/projected/a90f601c-a3b6-496b-9f50-2ecde1cb123b-kube-api-access-6pxvw\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:00 crc kubenswrapper[4910]: I0105 22:13:00.817854 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/77d19d69-6202-4594-8b11-e02ff86dc8f6-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:00 crc kubenswrapper[4910]: I0105 22:13:00.817863 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8900433-ac66-443f-8d83-72fefd413abd-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:00 crc kubenswrapper[4910]: I0105 22:13:00.817871 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a90f601c-a3b6-496b-9f50-2ecde1cb123b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:00 crc kubenswrapper[4910]: I0105 22:13:00.817880 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5xrn8\" (UniqueName: \"kubernetes.io/projected/f8900433-ac66-443f-8d83-72fefd413abd-kube-api-access-5xrn8\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:01 crc kubenswrapper[4910]: I0105 22:13:01.011139 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-mpds2" Jan 05 22:13:01 crc kubenswrapper[4910]: I0105 22:13:01.011140 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-mpds2" event={"ID":"77d19d69-6202-4594-8b11-e02ff86dc8f6","Type":"ContainerDied","Data":"8aa476ba0b2f085b967453998e6dff5bf6c8832ee9b2a97098c52da6c0b46ca1"} Jan 05 22:13:01 crc kubenswrapper[4910]: I0105 22:13:01.011283 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8aa476ba0b2f085b967453998e6dff5bf6c8832ee9b2a97098c52da6c0b46ca1" Jan 05 22:13:01 crc kubenswrapper[4910]: I0105 22:13:01.013061 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-bjzk5" event={"ID":"a90f601c-a3b6-496b-9f50-2ecde1cb123b","Type":"ContainerDied","Data":"71ad398ee89bd2a23e317cd73f3e873a8e505e533fa145bd606e9dd616ba8720"} Jan 05 22:13:01 crc kubenswrapper[4910]: I0105 22:13:01.013389 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="71ad398ee89bd2a23e317cd73f3e873a8e505e533fa145bd606e9dd616ba8720" Jan 05 22:13:01 crc kubenswrapper[4910]: I0105 22:13:01.013080 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-bjzk5" Jan 05 22:13:01 crc kubenswrapper[4910]: I0105 22:13:01.016288 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8797e759-783d-406e-b7e4-2f184019d3a4","Type":"ContainerStarted","Data":"5cd62cd7ae636deadd27d3542cc98f43a8068e6a8412baf73498319181725686"} Jan 05 22:13:01 crc kubenswrapper[4910]: I0105 22:13:01.019309 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-b0f9-account-create-update-jn9k7" Jan 05 22:13:01 crc kubenswrapper[4910]: I0105 22:13:01.019377 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-b0f9-account-create-update-jn9k7" event={"ID":"f8900433-ac66-443f-8d83-72fefd413abd","Type":"ContainerDied","Data":"0c6b7be5b69dabfc5939f0acd361eb47d35c3dbe3c94f99724aa620d3a6a1f63"} Jan 05 22:13:01 crc kubenswrapper[4910]: I0105 22:13:01.019431 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0c6b7be5b69dabfc5939f0acd361eb47d35c3dbe3c94f99724aa620d3a6a1f63" Jan 05 22:13:01 crc kubenswrapper[4910]: I0105 22:13:01.020020 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 05 22:13:01 crc kubenswrapper[4910]: I0105 22:13:01.020040 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 05 22:13:01 crc kubenswrapper[4910]: I0105 22:13:01.504997 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-941b-account-create-update-rj8nc" Jan 05 22:13:01 crc kubenswrapper[4910]: I0105 22:13:01.519453 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-398d-account-create-update-7q6nt" Jan 05 22:13:01 crc kubenswrapper[4910]: I0105 22:13:01.639659 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8892663e-d012-478a-99ea-8cff1f7c9b35-operator-scripts\") pod \"8892663e-d012-478a-99ea-8cff1f7c9b35\" (UID: \"8892663e-d012-478a-99ea-8cff1f7c9b35\") " Jan 05 22:13:01 crc kubenswrapper[4910]: I0105 22:13:01.639750 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2fd0274e-5312-4b6e-be52-03e243ac4e6b-operator-scripts\") pod \"2fd0274e-5312-4b6e-be52-03e243ac4e6b\" (UID: \"2fd0274e-5312-4b6e-be52-03e243ac4e6b\") " Jan 05 22:13:01 crc kubenswrapper[4910]: I0105 22:13:01.639871 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9nhth\" (UniqueName: \"kubernetes.io/projected/8892663e-d012-478a-99ea-8cff1f7c9b35-kube-api-access-9nhth\") pod \"8892663e-d012-478a-99ea-8cff1f7c9b35\" (UID: \"8892663e-d012-478a-99ea-8cff1f7c9b35\") " Jan 05 22:13:01 crc kubenswrapper[4910]: I0105 22:13:01.639916 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qxxhg\" (UniqueName: \"kubernetes.io/projected/2fd0274e-5312-4b6e-be52-03e243ac4e6b-kube-api-access-qxxhg\") pod \"2fd0274e-5312-4b6e-be52-03e243ac4e6b\" (UID: \"2fd0274e-5312-4b6e-be52-03e243ac4e6b\") " Jan 05 22:13:01 crc kubenswrapper[4910]: I0105 22:13:01.640309 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8892663e-d012-478a-99ea-8cff1f7c9b35-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8892663e-d012-478a-99ea-8cff1f7c9b35" (UID: "8892663e-d012-478a-99ea-8cff1f7c9b35"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:13:01 crc kubenswrapper[4910]: I0105 22:13:01.640543 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8892663e-d012-478a-99ea-8cff1f7c9b35-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:01 crc kubenswrapper[4910]: I0105 22:13:01.640666 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2fd0274e-5312-4b6e-be52-03e243ac4e6b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2fd0274e-5312-4b6e-be52-03e243ac4e6b" (UID: "2fd0274e-5312-4b6e-be52-03e243ac4e6b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:13:01 crc kubenswrapper[4910]: I0105 22:13:01.646013 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2fd0274e-5312-4b6e-be52-03e243ac4e6b-kube-api-access-qxxhg" (OuterVolumeSpecName: "kube-api-access-qxxhg") pod "2fd0274e-5312-4b6e-be52-03e243ac4e6b" (UID: "2fd0274e-5312-4b6e-be52-03e243ac4e6b"). InnerVolumeSpecName "kube-api-access-qxxhg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:13:01 crc kubenswrapper[4910]: I0105 22:13:01.647392 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8892663e-d012-478a-99ea-8cff1f7c9b35-kube-api-access-9nhth" (OuterVolumeSpecName: "kube-api-access-9nhth") pod "8892663e-d012-478a-99ea-8cff1f7c9b35" (UID: "8892663e-d012-478a-99ea-8cff1f7c9b35"). InnerVolumeSpecName "kube-api-access-9nhth". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:13:01 crc kubenswrapper[4910]: I0105 22:13:01.742961 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2fd0274e-5312-4b6e-be52-03e243ac4e6b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:01 crc kubenswrapper[4910]: I0105 22:13:01.742999 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9nhth\" (UniqueName: \"kubernetes.io/projected/8892663e-d012-478a-99ea-8cff1f7c9b35-kube-api-access-9nhth\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:01 crc kubenswrapper[4910]: I0105 22:13:01.743014 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qxxhg\" (UniqueName: \"kubernetes.io/projected/2fd0274e-5312-4b6e-be52-03e243ac4e6b-kube-api-access-qxxhg\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:02 crc kubenswrapper[4910]: I0105 22:13:02.030703 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-941b-account-create-update-rj8nc" Jan 05 22:13:02 crc kubenswrapper[4910]: I0105 22:13:02.030718 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-941b-account-create-update-rj8nc" event={"ID":"2fd0274e-5312-4b6e-be52-03e243ac4e6b","Type":"ContainerDied","Data":"03673ad2e1d1ebd41342fa277e176e19e047320c080b9498f5f695d2ca6cdb37"} Jan 05 22:13:02 crc kubenswrapper[4910]: I0105 22:13:02.030914 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="03673ad2e1d1ebd41342fa277e176e19e047320c080b9498f5f695d2ca6cdb37" Jan 05 22:13:02 crc kubenswrapper[4910]: I0105 22:13:02.033759 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-398d-account-create-update-7q6nt" event={"ID":"8892663e-d012-478a-99ea-8cff1f7c9b35","Type":"ContainerDied","Data":"7574849ad35b54d1a4736f11bebfcb0adf6351bc46e8b20dfa1df25193f552a1"} Jan 05 22:13:02 crc kubenswrapper[4910]: I0105 22:13:02.033782 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-398d-account-create-update-7q6nt" Jan 05 22:13:02 crc kubenswrapper[4910]: I0105 22:13:02.033793 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7574849ad35b54d1a4736f11bebfcb0adf6351bc46e8b20dfa1df25193f552a1" Jan 05 22:13:02 crc kubenswrapper[4910]: I0105 22:13:02.036726 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8797e759-783d-406e-b7e4-2f184019d3a4","Type":"ContainerStarted","Data":"37e564ca57594d4deb4b8bf6773d294d5ba687af7ebd0d6145e576355e4b91f6"} Jan 05 22:13:03 crc kubenswrapper[4910]: I0105 22:13:03.181716 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 05 22:13:03 crc kubenswrapper[4910]: I0105 22:13:03.182171 4910 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 05 22:13:03 crc kubenswrapper[4910]: I0105 22:13:03.272615 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 05 22:13:04 crc kubenswrapper[4910]: I0105 22:13:04.059249 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8797e759-783d-406e-b7e4-2f184019d3a4","Type":"ContainerStarted","Data":"0b75308431292941e9df942bb1fb78860ad7ff465742aaf9bb4f6edec7b26925"} Jan 05 22:13:04 crc kubenswrapper[4910]: I0105 22:13:04.094590 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.112601333 podStartE2EDuration="8.094570192s" podCreationTimestamp="2026-01-05 22:12:56 +0000 UTC" firstStartedPulling="2026-01-05 22:12:58.091049751 +0000 UTC m=+1309.668547421" lastFinishedPulling="2026-01-05 22:13:03.07301861 +0000 UTC m=+1314.650516280" observedRunningTime="2026-01-05 22:13:04.088038449 +0000 UTC m=+1315.665536119" watchObservedRunningTime="2026-01-05 22:13:04.094570192 +0000 UTC m=+1315.672067862" Jan 05 22:13:05 crc kubenswrapper[4910]: I0105 22:13:05.069370 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 05 22:13:07 crc kubenswrapper[4910]: I0105 22:13:07.340256 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-2r4mt"] Jan 05 22:13:07 crc kubenswrapper[4910]: E0105 22:13:07.341016 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="607e486d-f70e-413c-8568-db15e01a3377" containerName="mariadb-database-create" Jan 05 22:13:07 crc kubenswrapper[4910]: I0105 22:13:07.341031 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="607e486d-f70e-413c-8568-db15e01a3377" containerName="mariadb-database-create" Jan 05 22:13:07 crc kubenswrapper[4910]: E0105 22:13:07.341054 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8900433-ac66-443f-8d83-72fefd413abd" containerName="mariadb-account-create-update" Jan 05 22:13:07 crc kubenswrapper[4910]: I0105 22:13:07.341061 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8900433-ac66-443f-8d83-72fefd413abd" containerName="mariadb-account-create-update" Jan 05 22:13:07 crc kubenswrapper[4910]: E0105 22:13:07.341075 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fd0274e-5312-4b6e-be52-03e243ac4e6b" containerName="mariadb-account-create-update" Jan 05 22:13:07 crc kubenswrapper[4910]: I0105 22:13:07.341083 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fd0274e-5312-4b6e-be52-03e243ac4e6b" containerName="mariadb-account-create-update" Jan 05 22:13:07 crc kubenswrapper[4910]: E0105 22:13:07.341097 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77d19d69-6202-4594-8b11-e02ff86dc8f6" containerName="mariadb-database-create" Jan 05 22:13:07 crc kubenswrapper[4910]: I0105 22:13:07.341103 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="77d19d69-6202-4594-8b11-e02ff86dc8f6" containerName="mariadb-database-create" Jan 05 22:13:07 crc kubenswrapper[4910]: E0105 22:13:07.341111 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a90f601c-a3b6-496b-9f50-2ecde1cb123b" containerName="mariadb-database-create" Jan 05 22:13:07 crc kubenswrapper[4910]: I0105 22:13:07.341135 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a90f601c-a3b6-496b-9f50-2ecde1cb123b" containerName="mariadb-database-create" Jan 05 22:13:07 crc kubenswrapper[4910]: E0105 22:13:07.341150 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8892663e-d012-478a-99ea-8cff1f7c9b35" containerName="mariadb-account-create-update" Jan 05 22:13:07 crc kubenswrapper[4910]: I0105 22:13:07.341156 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="8892663e-d012-478a-99ea-8cff1f7c9b35" containerName="mariadb-account-create-update" Jan 05 22:13:07 crc kubenswrapper[4910]: I0105 22:13:07.341336 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8900433-ac66-443f-8d83-72fefd413abd" containerName="mariadb-account-create-update" Jan 05 22:13:07 crc kubenswrapper[4910]: I0105 22:13:07.341351 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="607e486d-f70e-413c-8568-db15e01a3377" containerName="mariadb-database-create" Jan 05 22:13:07 crc kubenswrapper[4910]: I0105 22:13:07.341358 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="77d19d69-6202-4594-8b11-e02ff86dc8f6" containerName="mariadb-database-create" Jan 05 22:13:07 crc kubenswrapper[4910]: I0105 22:13:07.341368 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="8892663e-d012-478a-99ea-8cff1f7c9b35" containerName="mariadb-account-create-update" Jan 05 22:13:07 crc kubenswrapper[4910]: I0105 22:13:07.341379 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="a90f601c-a3b6-496b-9f50-2ecde1cb123b" containerName="mariadb-database-create" Jan 05 22:13:07 crc kubenswrapper[4910]: I0105 22:13:07.341393 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fd0274e-5312-4b6e-be52-03e243ac4e6b" containerName="mariadb-account-create-update" Jan 05 22:13:07 crc kubenswrapper[4910]: I0105 22:13:07.342032 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-2r4mt" Jan 05 22:13:07 crc kubenswrapper[4910]: I0105 22:13:07.345989 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-gdk5k" Jan 05 22:13:07 crc kubenswrapper[4910]: I0105 22:13:07.346000 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 05 22:13:07 crc kubenswrapper[4910]: I0105 22:13:07.346246 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Jan 05 22:13:07 crc kubenswrapper[4910]: I0105 22:13:07.350174 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-2r4mt"] Jan 05 22:13:07 crc kubenswrapper[4910]: I0105 22:13:07.477479 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/656781fb-f17c-4ea5-b35c-38d7639eb605-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-2r4mt\" (UID: \"656781fb-f17c-4ea5-b35c-38d7639eb605\") " pod="openstack/nova-cell0-conductor-db-sync-2r4mt" Jan 05 22:13:07 crc kubenswrapper[4910]: I0105 22:13:07.477615 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/656781fb-f17c-4ea5-b35c-38d7639eb605-scripts\") pod \"nova-cell0-conductor-db-sync-2r4mt\" (UID: \"656781fb-f17c-4ea5-b35c-38d7639eb605\") " pod="openstack/nova-cell0-conductor-db-sync-2r4mt" Jan 05 22:13:07 crc kubenswrapper[4910]: I0105 22:13:07.477646 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lcs7g\" (UniqueName: \"kubernetes.io/projected/656781fb-f17c-4ea5-b35c-38d7639eb605-kube-api-access-lcs7g\") pod \"nova-cell0-conductor-db-sync-2r4mt\" (UID: \"656781fb-f17c-4ea5-b35c-38d7639eb605\") " pod="openstack/nova-cell0-conductor-db-sync-2r4mt" Jan 05 22:13:07 crc kubenswrapper[4910]: I0105 22:13:07.477686 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/656781fb-f17c-4ea5-b35c-38d7639eb605-config-data\") pod \"nova-cell0-conductor-db-sync-2r4mt\" (UID: \"656781fb-f17c-4ea5-b35c-38d7639eb605\") " pod="openstack/nova-cell0-conductor-db-sync-2r4mt" Jan 05 22:13:07 crc kubenswrapper[4910]: I0105 22:13:07.579562 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/656781fb-f17c-4ea5-b35c-38d7639eb605-scripts\") pod \"nova-cell0-conductor-db-sync-2r4mt\" (UID: \"656781fb-f17c-4ea5-b35c-38d7639eb605\") " pod="openstack/nova-cell0-conductor-db-sync-2r4mt" Jan 05 22:13:07 crc kubenswrapper[4910]: I0105 22:13:07.579626 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lcs7g\" (UniqueName: \"kubernetes.io/projected/656781fb-f17c-4ea5-b35c-38d7639eb605-kube-api-access-lcs7g\") pod \"nova-cell0-conductor-db-sync-2r4mt\" (UID: \"656781fb-f17c-4ea5-b35c-38d7639eb605\") " pod="openstack/nova-cell0-conductor-db-sync-2r4mt" Jan 05 22:13:07 crc kubenswrapper[4910]: I0105 22:13:07.579700 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/656781fb-f17c-4ea5-b35c-38d7639eb605-config-data\") pod \"nova-cell0-conductor-db-sync-2r4mt\" (UID: \"656781fb-f17c-4ea5-b35c-38d7639eb605\") " pod="openstack/nova-cell0-conductor-db-sync-2r4mt" Jan 05 22:13:07 crc kubenswrapper[4910]: I0105 22:13:07.579743 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/656781fb-f17c-4ea5-b35c-38d7639eb605-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-2r4mt\" (UID: \"656781fb-f17c-4ea5-b35c-38d7639eb605\") " pod="openstack/nova-cell0-conductor-db-sync-2r4mt" Jan 05 22:13:07 crc kubenswrapper[4910]: I0105 22:13:07.586968 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/656781fb-f17c-4ea5-b35c-38d7639eb605-scripts\") pod \"nova-cell0-conductor-db-sync-2r4mt\" (UID: \"656781fb-f17c-4ea5-b35c-38d7639eb605\") " pod="openstack/nova-cell0-conductor-db-sync-2r4mt" Jan 05 22:13:07 crc kubenswrapper[4910]: I0105 22:13:07.587088 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/656781fb-f17c-4ea5-b35c-38d7639eb605-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-2r4mt\" (UID: \"656781fb-f17c-4ea5-b35c-38d7639eb605\") " pod="openstack/nova-cell0-conductor-db-sync-2r4mt" Jan 05 22:13:07 crc kubenswrapper[4910]: I0105 22:13:07.590978 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/656781fb-f17c-4ea5-b35c-38d7639eb605-config-data\") pod \"nova-cell0-conductor-db-sync-2r4mt\" (UID: \"656781fb-f17c-4ea5-b35c-38d7639eb605\") " pod="openstack/nova-cell0-conductor-db-sync-2r4mt" Jan 05 22:13:07 crc kubenswrapper[4910]: I0105 22:13:07.610643 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lcs7g\" (UniqueName: \"kubernetes.io/projected/656781fb-f17c-4ea5-b35c-38d7639eb605-kube-api-access-lcs7g\") pod \"nova-cell0-conductor-db-sync-2r4mt\" (UID: \"656781fb-f17c-4ea5-b35c-38d7639eb605\") " pod="openstack/nova-cell0-conductor-db-sync-2r4mt" Jan 05 22:13:07 crc kubenswrapper[4910]: I0105 22:13:07.663018 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-2r4mt" Jan 05 22:13:08 crc kubenswrapper[4910]: I0105 22:13:08.203825 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-2r4mt"] Jan 05 22:13:08 crc kubenswrapper[4910]: W0105 22:13:08.216392 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod656781fb_f17c_4ea5_b35c_38d7639eb605.slice/crio-a4de41de8b36b01aacea262e73fb87fc965d35850c5116ba775ac5c861a6f93e WatchSource:0}: Error finding container a4de41de8b36b01aacea262e73fb87fc965d35850c5116ba775ac5c861a6f93e: Status 404 returned error can't find the container with id a4de41de8b36b01aacea262e73fb87fc965d35850c5116ba775ac5c861a6f93e Jan 05 22:13:09 crc kubenswrapper[4910]: I0105 22:13:09.103030 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-2r4mt" event={"ID":"656781fb-f17c-4ea5-b35c-38d7639eb605","Type":"ContainerStarted","Data":"a4de41de8b36b01aacea262e73fb87fc965d35850c5116ba775ac5c861a6f93e"} Jan 05 22:13:15 crc kubenswrapper[4910]: I0105 22:13:15.166053 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-2r4mt" event={"ID":"656781fb-f17c-4ea5-b35c-38d7639eb605","Type":"ContainerStarted","Data":"b7619a936c7a930eb76cafc76fa049a86a194b4efb240e66cd9d1ddb9c037037"} Jan 05 22:13:15 crc kubenswrapper[4910]: I0105 22:13:15.190845 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-2r4mt" podStartSLOduration=1.828727777 podStartE2EDuration="8.190823979s" podCreationTimestamp="2026-01-05 22:13:07 +0000 UTC" firstStartedPulling="2026-01-05 22:13:08.223000798 +0000 UTC m=+1319.800498468" lastFinishedPulling="2026-01-05 22:13:14.585097 +0000 UTC m=+1326.162594670" observedRunningTime="2026-01-05 22:13:15.182837789 +0000 UTC m=+1326.760335479" watchObservedRunningTime="2026-01-05 22:13:15.190823979 +0000 UTC m=+1326.768321649" Jan 05 22:13:24 crc kubenswrapper[4910]: I0105 22:13:24.275285 4910 generic.go:334] "Generic (PLEG): container finished" podID="656781fb-f17c-4ea5-b35c-38d7639eb605" containerID="b7619a936c7a930eb76cafc76fa049a86a194b4efb240e66cd9d1ddb9c037037" exitCode=0 Jan 05 22:13:24 crc kubenswrapper[4910]: I0105 22:13:24.275361 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-2r4mt" event={"ID":"656781fb-f17c-4ea5-b35c-38d7639eb605","Type":"ContainerDied","Data":"b7619a936c7a930eb76cafc76fa049a86a194b4efb240e66cd9d1ddb9c037037"} Jan 05 22:13:25 crc kubenswrapper[4910]: I0105 22:13:25.618701 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-2r4mt" Jan 05 22:13:25 crc kubenswrapper[4910]: I0105 22:13:25.744166 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/656781fb-f17c-4ea5-b35c-38d7639eb605-config-data\") pod \"656781fb-f17c-4ea5-b35c-38d7639eb605\" (UID: \"656781fb-f17c-4ea5-b35c-38d7639eb605\") " Jan 05 22:13:25 crc kubenswrapper[4910]: I0105 22:13:25.744310 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lcs7g\" (UniqueName: \"kubernetes.io/projected/656781fb-f17c-4ea5-b35c-38d7639eb605-kube-api-access-lcs7g\") pod \"656781fb-f17c-4ea5-b35c-38d7639eb605\" (UID: \"656781fb-f17c-4ea5-b35c-38d7639eb605\") " Jan 05 22:13:25 crc kubenswrapper[4910]: I0105 22:13:25.744359 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/656781fb-f17c-4ea5-b35c-38d7639eb605-combined-ca-bundle\") pod \"656781fb-f17c-4ea5-b35c-38d7639eb605\" (UID: \"656781fb-f17c-4ea5-b35c-38d7639eb605\") " Jan 05 22:13:25 crc kubenswrapper[4910]: I0105 22:13:25.744464 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/656781fb-f17c-4ea5-b35c-38d7639eb605-scripts\") pod \"656781fb-f17c-4ea5-b35c-38d7639eb605\" (UID: \"656781fb-f17c-4ea5-b35c-38d7639eb605\") " Jan 05 22:13:25 crc kubenswrapper[4910]: I0105 22:13:25.750311 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/656781fb-f17c-4ea5-b35c-38d7639eb605-kube-api-access-lcs7g" (OuterVolumeSpecName: "kube-api-access-lcs7g") pod "656781fb-f17c-4ea5-b35c-38d7639eb605" (UID: "656781fb-f17c-4ea5-b35c-38d7639eb605"). InnerVolumeSpecName "kube-api-access-lcs7g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:13:25 crc kubenswrapper[4910]: I0105 22:13:25.756421 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/656781fb-f17c-4ea5-b35c-38d7639eb605-scripts" (OuterVolumeSpecName: "scripts") pod "656781fb-f17c-4ea5-b35c-38d7639eb605" (UID: "656781fb-f17c-4ea5-b35c-38d7639eb605"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:13:25 crc kubenswrapper[4910]: I0105 22:13:25.771302 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/656781fb-f17c-4ea5-b35c-38d7639eb605-config-data" (OuterVolumeSpecName: "config-data") pod "656781fb-f17c-4ea5-b35c-38d7639eb605" (UID: "656781fb-f17c-4ea5-b35c-38d7639eb605"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:13:25 crc kubenswrapper[4910]: I0105 22:13:25.777448 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/656781fb-f17c-4ea5-b35c-38d7639eb605-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "656781fb-f17c-4ea5-b35c-38d7639eb605" (UID: "656781fb-f17c-4ea5-b35c-38d7639eb605"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:13:25 crc kubenswrapper[4910]: I0105 22:13:25.846393 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lcs7g\" (UniqueName: \"kubernetes.io/projected/656781fb-f17c-4ea5-b35c-38d7639eb605-kube-api-access-lcs7g\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:25 crc kubenswrapper[4910]: I0105 22:13:25.846450 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/656781fb-f17c-4ea5-b35c-38d7639eb605-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:25 crc kubenswrapper[4910]: I0105 22:13:25.846481 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/656781fb-f17c-4ea5-b35c-38d7639eb605-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:25 crc kubenswrapper[4910]: I0105 22:13:25.846493 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/656781fb-f17c-4ea5-b35c-38d7639eb605-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:26 crc kubenswrapper[4910]: I0105 22:13:26.299827 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-2r4mt" event={"ID":"656781fb-f17c-4ea5-b35c-38d7639eb605","Type":"ContainerDied","Data":"a4de41de8b36b01aacea262e73fb87fc965d35850c5116ba775ac5c861a6f93e"} Jan 05 22:13:26 crc kubenswrapper[4910]: I0105 22:13:26.300219 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a4de41de8b36b01aacea262e73fb87fc965d35850c5116ba775ac5c861a6f93e" Jan 05 22:13:26 crc kubenswrapper[4910]: I0105 22:13:26.299880 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-2r4mt" Jan 05 22:13:26 crc kubenswrapper[4910]: I0105 22:13:26.410551 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 05 22:13:26 crc kubenswrapper[4910]: E0105 22:13:26.411307 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="656781fb-f17c-4ea5-b35c-38d7639eb605" containerName="nova-cell0-conductor-db-sync" Jan 05 22:13:26 crc kubenswrapper[4910]: I0105 22:13:26.411401 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="656781fb-f17c-4ea5-b35c-38d7639eb605" containerName="nova-cell0-conductor-db-sync" Jan 05 22:13:26 crc kubenswrapper[4910]: I0105 22:13:26.411712 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="656781fb-f17c-4ea5-b35c-38d7639eb605" containerName="nova-cell0-conductor-db-sync" Jan 05 22:13:26 crc kubenswrapper[4910]: I0105 22:13:26.412606 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 05 22:13:26 crc kubenswrapper[4910]: I0105 22:13:26.414910 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-gdk5k" Jan 05 22:13:26 crc kubenswrapper[4910]: I0105 22:13:26.416020 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 05 22:13:26 crc kubenswrapper[4910]: I0105 22:13:26.425260 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 05 22:13:26 crc kubenswrapper[4910]: I0105 22:13:26.561183 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de8aafdf-9b35-4c41-8726-6c7e86edee5f-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"de8aafdf-9b35-4c41-8726-6c7e86edee5f\") " pod="openstack/nova-cell0-conductor-0" Jan 05 22:13:26 crc kubenswrapper[4910]: I0105 22:13:26.561317 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de8aafdf-9b35-4c41-8726-6c7e86edee5f-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"de8aafdf-9b35-4c41-8726-6c7e86edee5f\") " pod="openstack/nova-cell0-conductor-0" Jan 05 22:13:26 crc kubenswrapper[4910]: I0105 22:13:26.561654 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tttg6\" (UniqueName: \"kubernetes.io/projected/de8aafdf-9b35-4c41-8726-6c7e86edee5f-kube-api-access-tttg6\") pod \"nova-cell0-conductor-0\" (UID: \"de8aafdf-9b35-4c41-8726-6c7e86edee5f\") " pod="openstack/nova-cell0-conductor-0" Jan 05 22:13:26 crc kubenswrapper[4910]: I0105 22:13:26.664259 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de8aafdf-9b35-4c41-8726-6c7e86edee5f-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"de8aafdf-9b35-4c41-8726-6c7e86edee5f\") " pod="openstack/nova-cell0-conductor-0" Jan 05 22:13:26 crc kubenswrapper[4910]: I0105 22:13:26.664352 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tttg6\" (UniqueName: \"kubernetes.io/projected/de8aafdf-9b35-4c41-8726-6c7e86edee5f-kube-api-access-tttg6\") pod \"nova-cell0-conductor-0\" (UID: \"de8aafdf-9b35-4c41-8726-6c7e86edee5f\") " pod="openstack/nova-cell0-conductor-0" Jan 05 22:13:26 crc kubenswrapper[4910]: I0105 22:13:26.664462 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de8aafdf-9b35-4c41-8726-6c7e86edee5f-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"de8aafdf-9b35-4c41-8726-6c7e86edee5f\") " pod="openstack/nova-cell0-conductor-0" Jan 05 22:13:26 crc kubenswrapper[4910]: I0105 22:13:26.670000 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de8aafdf-9b35-4c41-8726-6c7e86edee5f-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"de8aafdf-9b35-4c41-8726-6c7e86edee5f\") " pod="openstack/nova-cell0-conductor-0" Jan 05 22:13:26 crc kubenswrapper[4910]: I0105 22:13:26.670186 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de8aafdf-9b35-4c41-8726-6c7e86edee5f-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"de8aafdf-9b35-4c41-8726-6c7e86edee5f\") " pod="openstack/nova-cell0-conductor-0" Jan 05 22:13:26 crc kubenswrapper[4910]: I0105 22:13:26.684924 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tttg6\" (UniqueName: \"kubernetes.io/projected/de8aafdf-9b35-4c41-8726-6c7e86edee5f-kube-api-access-tttg6\") pod \"nova-cell0-conductor-0\" (UID: \"de8aafdf-9b35-4c41-8726-6c7e86edee5f\") " pod="openstack/nova-cell0-conductor-0" Jan 05 22:13:26 crc kubenswrapper[4910]: I0105 22:13:26.730643 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 05 22:13:27 crc kubenswrapper[4910]: I0105 22:13:27.170509 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 05 22:13:27 crc kubenswrapper[4910]: W0105 22:13:27.176326 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podde8aafdf_9b35_4c41_8726_6c7e86edee5f.slice/crio-4a96f0146a60e825d8c8659118268e18b77004d2aec84b07cf56c74d90452388 WatchSource:0}: Error finding container 4a96f0146a60e825d8c8659118268e18b77004d2aec84b07cf56c74d90452388: Status 404 returned error can't find the container with id 4a96f0146a60e825d8c8659118268e18b77004d2aec84b07cf56c74d90452388 Jan 05 22:13:27 crc kubenswrapper[4910]: I0105 22:13:27.312915 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"de8aafdf-9b35-4c41-8726-6c7e86edee5f","Type":"ContainerStarted","Data":"4a96f0146a60e825d8c8659118268e18b77004d2aec84b07cf56c74d90452388"} Jan 05 22:13:27 crc kubenswrapper[4910]: I0105 22:13:27.405536 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 05 22:13:28 crc kubenswrapper[4910]: I0105 22:13:28.324251 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"de8aafdf-9b35-4c41-8726-6c7e86edee5f","Type":"ContainerStarted","Data":"38699171184dfa46b8af02c0e7a8bf314316f1f3e8f7f4d2c59c764a37fae22a"} Jan 05 22:13:28 crc kubenswrapper[4910]: I0105 22:13:28.324804 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Jan 05 22:13:28 crc kubenswrapper[4910]: I0105 22:13:28.347908 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.347880793 podStartE2EDuration="2.347880793s" podCreationTimestamp="2026-01-05 22:13:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:13:28.343044232 +0000 UTC m=+1339.920541932" watchObservedRunningTime="2026-01-05 22:13:28.347880793 +0000 UTC m=+1339.925378463" Jan 05 22:13:30 crc kubenswrapper[4910]: I0105 22:13:30.750216 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 05 22:13:30 crc kubenswrapper[4910]: I0105 22:13:30.751054 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="db98b242-8ce3-4bc6-b04a-e22403612899" containerName="kube-state-metrics" containerID="cri-o://34e3a94f11898c05872cccd8fe29a521516732d502bea692be1e04ddae5e4717" gracePeriod=30 Jan 05 22:13:31 crc kubenswrapper[4910]: I0105 22:13:31.198505 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 05 22:13:31 crc kubenswrapper[4910]: I0105 22:13:31.350570 4910 generic.go:334] "Generic (PLEG): container finished" podID="db98b242-8ce3-4bc6-b04a-e22403612899" containerID="34e3a94f11898c05872cccd8fe29a521516732d502bea692be1e04ddae5e4717" exitCode=2 Jan 05 22:13:31 crc kubenswrapper[4910]: I0105 22:13:31.350641 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 05 22:13:31 crc kubenswrapper[4910]: I0105 22:13:31.350664 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"db98b242-8ce3-4bc6-b04a-e22403612899","Type":"ContainerDied","Data":"34e3a94f11898c05872cccd8fe29a521516732d502bea692be1e04ddae5e4717"} Jan 05 22:13:31 crc kubenswrapper[4910]: I0105 22:13:31.351000 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"db98b242-8ce3-4bc6-b04a-e22403612899","Type":"ContainerDied","Data":"0cc5a53ae4132d67a7189face75f6bbefc33b4db2b7160287e2bf5d55437732e"} Jan 05 22:13:31 crc kubenswrapper[4910]: I0105 22:13:31.351019 4910 scope.go:117] "RemoveContainer" containerID="34e3a94f11898c05872cccd8fe29a521516732d502bea692be1e04ddae5e4717" Jan 05 22:13:31 crc kubenswrapper[4910]: I0105 22:13:31.363342 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v4ppb\" (UniqueName: \"kubernetes.io/projected/db98b242-8ce3-4bc6-b04a-e22403612899-kube-api-access-v4ppb\") pod \"db98b242-8ce3-4bc6-b04a-e22403612899\" (UID: \"db98b242-8ce3-4bc6-b04a-e22403612899\") " Jan 05 22:13:31 crc kubenswrapper[4910]: I0105 22:13:31.369674 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db98b242-8ce3-4bc6-b04a-e22403612899-kube-api-access-v4ppb" (OuterVolumeSpecName: "kube-api-access-v4ppb") pod "db98b242-8ce3-4bc6-b04a-e22403612899" (UID: "db98b242-8ce3-4bc6-b04a-e22403612899"). InnerVolumeSpecName "kube-api-access-v4ppb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:13:31 crc kubenswrapper[4910]: I0105 22:13:31.374041 4910 scope.go:117] "RemoveContainer" containerID="34e3a94f11898c05872cccd8fe29a521516732d502bea692be1e04ddae5e4717" Jan 05 22:13:31 crc kubenswrapper[4910]: E0105 22:13:31.374693 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"34e3a94f11898c05872cccd8fe29a521516732d502bea692be1e04ddae5e4717\": container with ID starting with 34e3a94f11898c05872cccd8fe29a521516732d502bea692be1e04ddae5e4717 not found: ID does not exist" containerID="34e3a94f11898c05872cccd8fe29a521516732d502bea692be1e04ddae5e4717" Jan 05 22:13:31 crc kubenswrapper[4910]: I0105 22:13:31.374739 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"34e3a94f11898c05872cccd8fe29a521516732d502bea692be1e04ddae5e4717"} err="failed to get container status \"34e3a94f11898c05872cccd8fe29a521516732d502bea692be1e04ddae5e4717\": rpc error: code = NotFound desc = could not find container \"34e3a94f11898c05872cccd8fe29a521516732d502bea692be1e04ddae5e4717\": container with ID starting with 34e3a94f11898c05872cccd8fe29a521516732d502bea692be1e04ddae5e4717 not found: ID does not exist" Jan 05 22:13:31 crc kubenswrapper[4910]: I0105 22:13:31.466151 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v4ppb\" (UniqueName: \"kubernetes.io/projected/db98b242-8ce3-4bc6-b04a-e22403612899-kube-api-access-v4ppb\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:31 crc kubenswrapper[4910]: I0105 22:13:31.680722 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 05 22:13:31 crc kubenswrapper[4910]: I0105 22:13:31.691104 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 05 22:13:31 crc kubenswrapper[4910]: I0105 22:13:31.733920 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 05 22:13:31 crc kubenswrapper[4910]: E0105 22:13:31.734789 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db98b242-8ce3-4bc6-b04a-e22403612899" containerName="kube-state-metrics" Jan 05 22:13:31 crc kubenswrapper[4910]: I0105 22:13:31.734816 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="db98b242-8ce3-4bc6-b04a-e22403612899" containerName="kube-state-metrics" Jan 05 22:13:31 crc kubenswrapper[4910]: I0105 22:13:31.735269 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="db98b242-8ce3-4bc6-b04a-e22403612899" containerName="kube-state-metrics" Jan 05 22:13:31 crc kubenswrapper[4910]: I0105 22:13:31.737888 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 05 22:13:31 crc kubenswrapper[4910]: I0105 22:13:31.740081 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Jan 05 22:13:31 crc kubenswrapper[4910]: I0105 22:13:31.740697 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Jan 05 22:13:31 crc kubenswrapper[4910]: I0105 22:13:31.746638 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 05 22:13:31 crc kubenswrapper[4910]: I0105 22:13:31.873103 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b651f520-1463-434f-b16f-edd2b1b8f8d9-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"b651f520-1463-434f-b16f-edd2b1b8f8d9\") " pod="openstack/kube-state-metrics-0" Jan 05 22:13:31 crc kubenswrapper[4910]: I0105 22:13:31.873164 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/b651f520-1463-434f-b16f-edd2b1b8f8d9-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"b651f520-1463-434f-b16f-edd2b1b8f8d9\") " pod="openstack/kube-state-metrics-0" Jan 05 22:13:31 crc kubenswrapper[4910]: I0105 22:13:31.873208 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gn8t9\" (UniqueName: \"kubernetes.io/projected/b651f520-1463-434f-b16f-edd2b1b8f8d9-kube-api-access-gn8t9\") pod \"kube-state-metrics-0\" (UID: \"b651f520-1463-434f-b16f-edd2b1b8f8d9\") " pod="openstack/kube-state-metrics-0" Jan 05 22:13:31 crc kubenswrapper[4910]: I0105 22:13:31.873295 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/b651f520-1463-434f-b16f-edd2b1b8f8d9-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"b651f520-1463-434f-b16f-edd2b1b8f8d9\") " pod="openstack/kube-state-metrics-0" Jan 05 22:13:31 crc kubenswrapper[4910]: I0105 22:13:31.975417 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b651f520-1463-434f-b16f-edd2b1b8f8d9-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"b651f520-1463-434f-b16f-edd2b1b8f8d9\") " pod="openstack/kube-state-metrics-0" Jan 05 22:13:31 crc kubenswrapper[4910]: I0105 22:13:31.975490 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/b651f520-1463-434f-b16f-edd2b1b8f8d9-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"b651f520-1463-434f-b16f-edd2b1b8f8d9\") " pod="openstack/kube-state-metrics-0" Jan 05 22:13:31 crc kubenswrapper[4910]: I0105 22:13:31.975539 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gn8t9\" (UniqueName: \"kubernetes.io/projected/b651f520-1463-434f-b16f-edd2b1b8f8d9-kube-api-access-gn8t9\") pod \"kube-state-metrics-0\" (UID: \"b651f520-1463-434f-b16f-edd2b1b8f8d9\") " pod="openstack/kube-state-metrics-0" Jan 05 22:13:31 crc kubenswrapper[4910]: I0105 22:13:31.975670 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/b651f520-1463-434f-b16f-edd2b1b8f8d9-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"b651f520-1463-434f-b16f-edd2b1b8f8d9\") " pod="openstack/kube-state-metrics-0" Jan 05 22:13:31 crc kubenswrapper[4910]: I0105 22:13:31.980316 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/b651f520-1463-434f-b16f-edd2b1b8f8d9-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"b651f520-1463-434f-b16f-edd2b1b8f8d9\") " pod="openstack/kube-state-metrics-0" Jan 05 22:13:31 crc kubenswrapper[4910]: I0105 22:13:31.980540 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b651f520-1463-434f-b16f-edd2b1b8f8d9-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"b651f520-1463-434f-b16f-edd2b1b8f8d9\") " pod="openstack/kube-state-metrics-0" Jan 05 22:13:31 crc kubenswrapper[4910]: I0105 22:13:31.980578 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/b651f520-1463-434f-b16f-edd2b1b8f8d9-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"b651f520-1463-434f-b16f-edd2b1b8f8d9\") " pod="openstack/kube-state-metrics-0" Jan 05 22:13:31 crc kubenswrapper[4910]: I0105 22:13:31.993187 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gn8t9\" (UniqueName: \"kubernetes.io/projected/b651f520-1463-434f-b16f-edd2b1b8f8d9-kube-api-access-gn8t9\") pod \"kube-state-metrics-0\" (UID: \"b651f520-1463-434f-b16f-edd2b1b8f8d9\") " pod="openstack/kube-state-metrics-0" Jan 05 22:13:32 crc kubenswrapper[4910]: I0105 22:13:32.060904 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 05 22:13:32 crc kubenswrapper[4910]: I0105 22:13:32.469971 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:13:32 crc kubenswrapper[4910]: I0105 22:13:32.470766 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8797e759-783d-406e-b7e4-2f184019d3a4" containerName="ceilometer-central-agent" containerID="cri-o://8b2aaeeda5db7456c91ccc8d6a469af50cff132146c8f0688cf6672f7d4e7a1d" gracePeriod=30 Jan 05 22:13:32 crc kubenswrapper[4910]: I0105 22:13:32.470824 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8797e759-783d-406e-b7e4-2f184019d3a4" containerName="proxy-httpd" containerID="cri-o://0b75308431292941e9df942bb1fb78860ad7ff465742aaf9bb4f6edec7b26925" gracePeriod=30 Jan 05 22:13:32 crc kubenswrapper[4910]: I0105 22:13:32.470934 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8797e759-783d-406e-b7e4-2f184019d3a4" containerName="sg-core" containerID="cri-o://37e564ca57594d4deb4b8bf6773d294d5ba687af7ebd0d6145e576355e4b91f6" gracePeriod=30 Jan 05 22:13:32 crc kubenswrapper[4910]: I0105 22:13:32.471009 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8797e759-783d-406e-b7e4-2f184019d3a4" containerName="ceilometer-notification-agent" containerID="cri-o://5cd62cd7ae636deadd27d3542cc98f43a8068e6a8412baf73498319181725686" gracePeriod=30 Jan 05 22:13:32 crc kubenswrapper[4910]: I0105 22:13:32.511347 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 05 22:13:32 crc kubenswrapper[4910]: I0105 22:13:32.735175 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db98b242-8ce3-4bc6-b04a-e22403612899" path="/var/lib/kubelet/pods/db98b242-8ce3-4bc6-b04a-e22403612899/volumes" Jan 05 22:13:33 crc kubenswrapper[4910]: I0105 22:13:33.375274 4910 generic.go:334] "Generic (PLEG): container finished" podID="8797e759-783d-406e-b7e4-2f184019d3a4" containerID="0b75308431292941e9df942bb1fb78860ad7ff465742aaf9bb4f6edec7b26925" exitCode=0 Jan 05 22:13:33 crc kubenswrapper[4910]: I0105 22:13:33.375826 4910 generic.go:334] "Generic (PLEG): container finished" podID="8797e759-783d-406e-b7e4-2f184019d3a4" containerID="37e564ca57594d4deb4b8bf6773d294d5ba687af7ebd0d6145e576355e4b91f6" exitCode=2 Jan 05 22:13:33 crc kubenswrapper[4910]: I0105 22:13:33.375835 4910 generic.go:334] "Generic (PLEG): container finished" podID="8797e759-783d-406e-b7e4-2f184019d3a4" containerID="8b2aaeeda5db7456c91ccc8d6a469af50cff132146c8f0688cf6672f7d4e7a1d" exitCode=0 Jan 05 22:13:33 crc kubenswrapper[4910]: I0105 22:13:33.375362 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8797e759-783d-406e-b7e4-2f184019d3a4","Type":"ContainerDied","Data":"0b75308431292941e9df942bb1fb78860ad7ff465742aaf9bb4f6edec7b26925"} Jan 05 22:13:33 crc kubenswrapper[4910]: I0105 22:13:33.375916 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8797e759-783d-406e-b7e4-2f184019d3a4","Type":"ContainerDied","Data":"37e564ca57594d4deb4b8bf6773d294d5ba687af7ebd0d6145e576355e4b91f6"} Jan 05 22:13:33 crc kubenswrapper[4910]: I0105 22:13:33.375935 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8797e759-783d-406e-b7e4-2f184019d3a4","Type":"ContainerDied","Data":"8b2aaeeda5db7456c91ccc8d6a469af50cff132146c8f0688cf6672f7d4e7a1d"} Jan 05 22:13:33 crc kubenswrapper[4910]: I0105 22:13:33.377652 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"b651f520-1463-434f-b16f-edd2b1b8f8d9","Type":"ContainerStarted","Data":"7eb793854dd2ca885d20aea4858a9baa5dd3b5bf64d04ad614881d8b63b82097"} Jan 05 22:13:33 crc kubenswrapper[4910]: I0105 22:13:33.377676 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"b651f520-1463-434f-b16f-edd2b1b8f8d9","Type":"ContainerStarted","Data":"95f2ff3b9c6c9c1bb55c592f771e9fecc4f1e0ca5bf74d5c87d32243569bc18f"} Jan 05 22:13:33 crc kubenswrapper[4910]: I0105 22:13:33.377983 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 05 22:13:33 crc kubenswrapper[4910]: I0105 22:13:33.397159 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.021215014 podStartE2EDuration="2.397093047s" podCreationTimestamp="2026-01-05 22:13:31 +0000 UTC" firstStartedPulling="2026-01-05 22:13:32.515303265 +0000 UTC m=+1344.092800935" lastFinishedPulling="2026-01-05 22:13:32.891181298 +0000 UTC m=+1344.468678968" observedRunningTime="2026-01-05 22:13:33.393783494 +0000 UTC m=+1344.971281184" watchObservedRunningTime="2026-01-05 22:13:33.397093047 +0000 UTC m=+1344.974590717" Jan 05 22:13:36 crc kubenswrapper[4910]: I0105 22:13:36.758016 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.296967 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-7srlg"] Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.298925 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-7srlg" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.306397 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.311592 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.314094 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-7srlg"] Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.379624 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44f006eb-f848-4351-914e-9a9e751194a3-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-7srlg\" (UID: \"44f006eb-f848-4351-914e-9a9e751194a3\") " pod="openstack/nova-cell0-cell-mapping-7srlg" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.379700 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44f006eb-f848-4351-914e-9a9e751194a3-config-data\") pod \"nova-cell0-cell-mapping-7srlg\" (UID: \"44f006eb-f848-4351-914e-9a9e751194a3\") " pod="openstack/nova-cell0-cell-mapping-7srlg" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.380104 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tjqhf\" (UniqueName: \"kubernetes.io/projected/44f006eb-f848-4351-914e-9a9e751194a3-kube-api-access-tjqhf\") pod \"nova-cell0-cell-mapping-7srlg\" (UID: \"44f006eb-f848-4351-914e-9a9e751194a3\") " pod="openstack/nova-cell0-cell-mapping-7srlg" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.380248 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/44f006eb-f848-4351-914e-9a9e751194a3-scripts\") pod \"nova-cell0-cell-mapping-7srlg\" (UID: \"44f006eb-f848-4351-914e-9a9e751194a3\") " pod="openstack/nova-cell0-cell-mapping-7srlg" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.418433 4910 generic.go:334] "Generic (PLEG): container finished" podID="8797e759-783d-406e-b7e4-2f184019d3a4" containerID="5cd62cd7ae636deadd27d3542cc98f43a8068e6a8412baf73498319181725686" exitCode=0 Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.418470 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8797e759-783d-406e-b7e4-2f184019d3a4","Type":"ContainerDied","Data":"5cd62cd7ae636deadd27d3542cc98f43a8068e6a8412baf73498319181725686"} Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.482215 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44f006eb-f848-4351-914e-9a9e751194a3-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-7srlg\" (UID: \"44f006eb-f848-4351-914e-9a9e751194a3\") " pod="openstack/nova-cell0-cell-mapping-7srlg" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.482304 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44f006eb-f848-4351-914e-9a9e751194a3-config-data\") pod \"nova-cell0-cell-mapping-7srlg\" (UID: \"44f006eb-f848-4351-914e-9a9e751194a3\") " pod="openstack/nova-cell0-cell-mapping-7srlg" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.482384 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tjqhf\" (UniqueName: \"kubernetes.io/projected/44f006eb-f848-4351-914e-9a9e751194a3-kube-api-access-tjqhf\") pod \"nova-cell0-cell-mapping-7srlg\" (UID: \"44f006eb-f848-4351-914e-9a9e751194a3\") " pod="openstack/nova-cell0-cell-mapping-7srlg" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.482411 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/44f006eb-f848-4351-914e-9a9e751194a3-scripts\") pod \"nova-cell0-cell-mapping-7srlg\" (UID: \"44f006eb-f848-4351-914e-9a9e751194a3\") " pod="openstack/nova-cell0-cell-mapping-7srlg" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.492463 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44f006eb-f848-4351-914e-9a9e751194a3-config-data\") pod \"nova-cell0-cell-mapping-7srlg\" (UID: \"44f006eb-f848-4351-914e-9a9e751194a3\") " pod="openstack/nova-cell0-cell-mapping-7srlg" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.498331 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44f006eb-f848-4351-914e-9a9e751194a3-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-7srlg\" (UID: \"44f006eb-f848-4351-914e-9a9e751194a3\") " pod="openstack/nova-cell0-cell-mapping-7srlg" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.498755 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/44f006eb-f848-4351-914e-9a9e751194a3-scripts\") pod \"nova-cell0-cell-mapping-7srlg\" (UID: \"44f006eb-f848-4351-914e-9a9e751194a3\") " pod="openstack/nova-cell0-cell-mapping-7srlg" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.513036 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tjqhf\" (UniqueName: \"kubernetes.io/projected/44f006eb-f848-4351-914e-9a9e751194a3-kube-api-access-tjqhf\") pod \"nova-cell0-cell-mapping-7srlg\" (UID: \"44f006eb-f848-4351-914e-9a9e751194a3\") " pod="openstack/nova-cell0-cell-mapping-7srlg" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.556442 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.557993 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.563383 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.584877 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.636490 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-7srlg" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.686777 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55390b37-0abf-4520-ae3e-6f361a2b3f17-config-data\") pod \"nova-api-0\" (UID: \"55390b37-0abf-4520-ae3e-6f361a2b3f17\") " pod="openstack/nova-api-0" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.686856 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55390b37-0abf-4520-ae3e-6f361a2b3f17-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"55390b37-0abf-4520-ae3e-6f361a2b3f17\") " pod="openstack/nova-api-0" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.686916 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/55390b37-0abf-4520-ae3e-6f361a2b3f17-logs\") pod \"nova-api-0\" (UID: \"55390b37-0abf-4520-ae3e-6f361a2b3f17\") " pod="openstack/nova-api-0" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.687010 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zh5gz\" (UniqueName: \"kubernetes.io/projected/55390b37-0abf-4520-ae3e-6f361a2b3f17-kube-api-access-zh5gz\") pod \"nova-api-0\" (UID: \"55390b37-0abf-4520-ae3e-6f361a2b3f17\") " pod="openstack/nova-api-0" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.791975 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zh5gz\" (UniqueName: \"kubernetes.io/projected/55390b37-0abf-4520-ae3e-6f361a2b3f17-kube-api-access-zh5gz\") pod \"nova-api-0\" (UID: \"55390b37-0abf-4520-ae3e-6f361a2b3f17\") " pod="openstack/nova-api-0" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.792602 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55390b37-0abf-4520-ae3e-6f361a2b3f17-config-data\") pod \"nova-api-0\" (UID: \"55390b37-0abf-4520-ae3e-6f361a2b3f17\") " pod="openstack/nova-api-0" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.792669 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55390b37-0abf-4520-ae3e-6f361a2b3f17-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"55390b37-0abf-4520-ae3e-6f361a2b3f17\") " pod="openstack/nova-api-0" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.792765 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/55390b37-0abf-4520-ae3e-6f361a2b3f17-logs\") pod \"nova-api-0\" (UID: \"55390b37-0abf-4520-ae3e-6f361a2b3f17\") " pod="openstack/nova-api-0" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.793854 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/55390b37-0abf-4520-ae3e-6f361a2b3f17-logs\") pod \"nova-api-0\" (UID: \"55390b37-0abf-4520-ae3e-6f361a2b3f17\") " pod="openstack/nova-api-0" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.801080 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55390b37-0abf-4520-ae3e-6f361a2b3f17-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"55390b37-0abf-4520-ae3e-6f361a2b3f17\") " pod="openstack/nova-api-0" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.802964 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55390b37-0abf-4520-ae3e-6f361a2b3f17-config-data\") pod \"nova-api-0\" (UID: \"55390b37-0abf-4520-ae3e-6f361a2b3f17\") " pod="openstack/nova-api-0" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.819876 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zh5gz\" (UniqueName: \"kubernetes.io/projected/55390b37-0abf-4520-ae3e-6f361a2b3f17-kube-api-access-zh5gz\") pod \"nova-api-0\" (UID: \"55390b37-0abf-4520-ae3e-6f361a2b3f17\") " pod="openstack/nova-api-0" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.899695 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.918209 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.919696 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.929581 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.931454 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.935214 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.940760 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 05 22:13:37 crc kubenswrapper[4910]: I0105 22:13:37.978906 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.011888 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.014276 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.027484 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.111185 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.115169 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3\") " pod="openstack/nova-scheduler-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.115232 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3a1ce19-89a2-4aba-a60f-5338c61c2e87-logs\") pod \"nova-metadata-0\" (UID: \"f3a1ce19-89a2-4aba-a60f-5338c61c2e87\") " pod="openstack/nova-metadata-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.115269 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3-config-data\") pod \"nova-scheduler-0\" (UID: \"3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3\") " pod="openstack/nova-scheduler-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.115327 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3a1ce19-89a2-4aba-a60f-5338c61c2e87-config-data\") pod \"nova-metadata-0\" (UID: \"f3a1ce19-89a2-4aba-a60f-5338c61c2e87\") " pod="openstack/nova-metadata-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.115355 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3a1ce19-89a2-4aba-a60f-5338c61c2e87-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"f3a1ce19-89a2-4aba-a60f-5338c61c2e87\") " pod="openstack/nova-metadata-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.115373 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r274j\" (UniqueName: \"kubernetes.io/projected/3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3-kube-api-access-r274j\") pod \"nova-scheduler-0\" (UID: \"3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3\") " pod="openstack/nova-scheduler-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.115394 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sqwz2\" (UniqueName: \"kubernetes.io/projected/a57cd3fa-1d29-4a8c-a85f-2735b92640a7-kube-api-access-sqwz2\") pod \"nova-cell1-novncproxy-0\" (UID: \"a57cd3fa-1d29-4a8c-a85f-2735b92640a7\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.115416 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a57cd3fa-1d29-4a8c-a85f-2735b92640a7-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"a57cd3fa-1d29-4a8c-a85f-2735b92640a7\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.115454 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t8d9r\" (UniqueName: \"kubernetes.io/projected/f3a1ce19-89a2-4aba-a60f-5338c61c2e87-kube-api-access-t8d9r\") pod \"nova-metadata-0\" (UID: \"f3a1ce19-89a2-4aba-a60f-5338c61c2e87\") " pod="openstack/nova-metadata-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.115480 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a57cd3fa-1d29-4a8c-a85f-2735b92640a7-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"a57cd3fa-1d29-4a8c-a85f-2735b92640a7\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.124518 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.172039 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5bfb54f9b5-zbvpw"] Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.173994 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bfb54f9b5-zbvpw" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.196925 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bfb54f9b5-zbvpw"] Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.206525 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-7srlg"] Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.217476 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3a1ce19-89a2-4aba-a60f-5338c61c2e87-config-data\") pod \"nova-metadata-0\" (UID: \"f3a1ce19-89a2-4aba-a60f-5338c61c2e87\") " pod="openstack/nova-metadata-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.217744 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3a1ce19-89a2-4aba-a60f-5338c61c2e87-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"f3a1ce19-89a2-4aba-a60f-5338c61c2e87\") " pod="openstack/nova-metadata-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.217876 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r274j\" (UniqueName: \"kubernetes.io/projected/3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3-kube-api-access-r274j\") pod \"nova-scheduler-0\" (UID: \"3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3\") " pod="openstack/nova-scheduler-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.217977 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sqwz2\" (UniqueName: \"kubernetes.io/projected/a57cd3fa-1d29-4a8c-a85f-2735b92640a7-kube-api-access-sqwz2\") pod \"nova-cell1-novncproxy-0\" (UID: \"a57cd3fa-1d29-4a8c-a85f-2735b92640a7\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.218087 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a57cd3fa-1d29-4a8c-a85f-2735b92640a7-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"a57cd3fa-1d29-4a8c-a85f-2735b92640a7\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.218263 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t8d9r\" (UniqueName: \"kubernetes.io/projected/f3a1ce19-89a2-4aba-a60f-5338c61c2e87-kube-api-access-t8d9r\") pod \"nova-metadata-0\" (UID: \"f3a1ce19-89a2-4aba-a60f-5338c61c2e87\") " pod="openstack/nova-metadata-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.218391 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a57cd3fa-1d29-4a8c-a85f-2735b92640a7-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"a57cd3fa-1d29-4a8c-a85f-2735b92640a7\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.219320 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3\") " pod="openstack/nova-scheduler-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.219652 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3a1ce19-89a2-4aba-a60f-5338c61c2e87-logs\") pod \"nova-metadata-0\" (UID: \"f3a1ce19-89a2-4aba-a60f-5338c61c2e87\") " pod="openstack/nova-metadata-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.219831 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3-config-data\") pod \"nova-scheduler-0\" (UID: \"3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3\") " pod="openstack/nova-scheduler-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.232017 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3a1ce19-89a2-4aba-a60f-5338c61c2e87-logs\") pod \"nova-metadata-0\" (UID: \"f3a1ce19-89a2-4aba-a60f-5338c61c2e87\") " pod="openstack/nova-metadata-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.240801 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3a1ce19-89a2-4aba-a60f-5338c61c2e87-config-data\") pod \"nova-metadata-0\" (UID: \"f3a1ce19-89a2-4aba-a60f-5338c61c2e87\") " pod="openstack/nova-metadata-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.240980 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a57cd3fa-1d29-4a8c-a85f-2735b92640a7-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"a57cd3fa-1d29-4a8c-a85f-2735b92640a7\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.246522 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sqwz2\" (UniqueName: \"kubernetes.io/projected/a57cd3fa-1d29-4a8c-a85f-2735b92640a7-kube-api-access-sqwz2\") pod \"nova-cell1-novncproxy-0\" (UID: \"a57cd3fa-1d29-4a8c-a85f-2735b92640a7\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.247844 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3\") " pod="openstack/nova-scheduler-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.250564 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r274j\" (UniqueName: \"kubernetes.io/projected/3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3-kube-api-access-r274j\") pod \"nova-scheduler-0\" (UID: \"3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3\") " pod="openstack/nova-scheduler-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.253964 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3-config-data\") pod \"nova-scheduler-0\" (UID: \"3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3\") " pod="openstack/nova-scheduler-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.269968 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a57cd3fa-1d29-4a8c-a85f-2735b92640a7-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"a57cd3fa-1d29-4a8c-a85f-2735b92640a7\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.269998 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3a1ce19-89a2-4aba-a60f-5338c61c2e87-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"f3a1ce19-89a2-4aba-a60f-5338c61c2e87\") " pod="openstack/nova-metadata-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.270443 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.270542 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t8d9r\" (UniqueName: \"kubernetes.io/projected/f3a1ce19-89a2-4aba-a60f-5338c61c2e87-kube-api-access-t8d9r\") pod \"nova-metadata-0\" (UID: \"f3a1ce19-89a2-4aba-a60f-5338c61c2e87\") " pod="openstack/nova-metadata-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.293542 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.333901 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0a447ec9-7c46-472b-af0a-1c0633e4abf2-ovsdbserver-sb\") pod \"dnsmasq-dns-5bfb54f9b5-zbvpw\" (UID: \"0a447ec9-7c46-472b-af0a-1c0633e4abf2\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-zbvpw" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.334273 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0a447ec9-7c46-472b-af0a-1c0633e4abf2-dns-swift-storage-0\") pod \"dnsmasq-dns-5bfb54f9b5-zbvpw\" (UID: \"0a447ec9-7c46-472b-af0a-1c0633e4abf2\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-zbvpw" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.334438 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0a447ec9-7c46-472b-af0a-1c0633e4abf2-config\") pod \"dnsmasq-dns-5bfb54f9b5-zbvpw\" (UID: \"0a447ec9-7c46-472b-af0a-1c0633e4abf2\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-zbvpw" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.334802 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgsxd\" (UniqueName: \"kubernetes.io/projected/0a447ec9-7c46-472b-af0a-1c0633e4abf2-kube-api-access-cgsxd\") pod \"dnsmasq-dns-5bfb54f9b5-zbvpw\" (UID: \"0a447ec9-7c46-472b-af0a-1c0633e4abf2\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-zbvpw" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.335030 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0a447ec9-7c46-472b-af0a-1c0633e4abf2-dns-svc\") pod \"dnsmasq-dns-5bfb54f9b5-zbvpw\" (UID: \"0a447ec9-7c46-472b-af0a-1c0633e4abf2\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-zbvpw" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.335336 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0a447ec9-7c46-472b-af0a-1c0633e4abf2-ovsdbserver-nb\") pod \"dnsmasq-dns-5bfb54f9b5-zbvpw\" (UID: \"0a447ec9-7c46-472b-af0a-1c0633e4abf2\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-zbvpw" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.370031 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.437199 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgsxd\" (UniqueName: \"kubernetes.io/projected/0a447ec9-7c46-472b-af0a-1c0633e4abf2-kube-api-access-cgsxd\") pod \"dnsmasq-dns-5bfb54f9b5-zbvpw\" (UID: \"0a447ec9-7c46-472b-af0a-1c0633e4abf2\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-zbvpw" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.437278 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0a447ec9-7c46-472b-af0a-1c0633e4abf2-dns-svc\") pod \"dnsmasq-dns-5bfb54f9b5-zbvpw\" (UID: \"0a447ec9-7c46-472b-af0a-1c0633e4abf2\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-zbvpw" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.437353 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0a447ec9-7c46-472b-af0a-1c0633e4abf2-ovsdbserver-nb\") pod \"dnsmasq-dns-5bfb54f9b5-zbvpw\" (UID: \"0a447ec9-7c46-472b-af0a-1c0633e4abf2\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-zbvpw" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.437401 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0a447ec9-7c46-472b-af0a-1c0633e4abf2-ovsdbserver-sb\") pod \"dnsmasq-dns-5bfb54f9b5-zbvpw\" (UID: \"0a447ec9-7c46-472b-af0a-1c0633e4abf2\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-zbvpw" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.437426 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0a447ec9-7c46-472b-af0a-1c0633e4abf2-dns-swift-storage-0\") pod \"dnsmasq-dns-5bfb54f9b5-zbvpw\" (UID: \"0a447ec9-7c46-472b-af0a-1c0633e4abf2\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-zbvpw" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.437457 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0a447ec9-7c46-472b-af0a-1c0633e4abf2-config\") pod \"dnsmasq-dns-5bfb54f9b5-zbvpw\" (UID: \"0a447ec9-7c46-472b-af0a-1c0633e4abf2\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-zbvpw" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.438866 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0a447ec9-7c46-472b-af0a-1c0633e4abf2-config\") pod \"dnsmasq-dns-5bfb54f9b5-zbvpw\" (UID: \"0a447ec9-7c46-472b-af0a-1c0633e4abf2\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-zbvpw" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.439345 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0a447ec9-7c46-472b-af0a-1c0633e4abf2-ovsdbserver-nb\") pod \"dnsmasq-dns-5bfb54f9b5-zbvpw\" (UID: \"0a447ec9-7c46-472b-af0a-1c0633e4abf2\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-zbvpw" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.439632 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0a447ec9-7c46-472b-af0a-1c0633e4abf2-ovsdbserver-sb\") pod \"dnsmasq-dns-5bfb54f9b5-zbvpw\" (UID: \"0a447ec9-7c46-472b-af0a-1c0633e4abf2\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-zbvpw" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.440544 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0a447ec9-7c46-472b-af0a-1c0633e4abf2-dns-svc\") pod \"dnsmasq-dns-5bfb54f9b5-zbvpw\" (UID: \"0a447ec9-7c46-472b-af0a-1c0633e4abf2\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-zbvpw" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.442583 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0a447ec9-7c46-472b-af0a-1c0633e4abf2-dns-swift-storage-0\") pod \"dnsmasq-dns-5bfb54f9b5-zbvpw\" (UID: \"0a447ec9-7c46-472b-af0a-1c0633e4abf2\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-zbvpw" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.455251 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-7srlg" event={"ID":"44f006eb-f848-4351-914e-9a9e751194a3","Type":"ContainerStarted","Data":"ad9d152d89bf0aae50366306be76e1e575d79035970e611b02a0492239883eba"} Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.469408 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgsxd\" (UniqueName: \"kubernetes.io/projected/0a447ec9-7c46-472b-af0a-1c0633e4abf2-kube-api-access-cgsxd\") pod \"dnsmasq-dns-5bfb54f9b5-zbvpw\" (UID: \"0a447ec9-7c46-472b-af0a-1c0633e4abf2\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-zbvpw" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.537902 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-gqjqz"] Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.540410 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-gqjqz"] Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.540494 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-gqjqz" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.546019 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.553467 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.619059 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.642504 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e520b140-ba86-4e17-82d2-4e8c4dc15474-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-gqjqz\" (UID: \"e520b140-ba86-4e17-82d2-4e8c4dc15474\") " pod="openstack/nova-cell1-conductor-db-sync-gqjqz" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.642565 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wtldk\" (UniqueName: \"kubernetes.io/projected/e520b140-ba86-4e17-82d2-4e8c4dc15474-kube-api-access-wtldk\") pod \"nova-cell1-conductor-db-sync-gqjqz\" (UID: \"e520b140-ba86-4e17-82d2-4e8c4dc15474\") " pod="openstack/nova-cell1-conductor-db-sync-gqjqz" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.642597 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e520b140-ba86-4e17-82d2-4e8c4dc15474-config-data\") pod \"nova-cell1-conductor-db-sync-gqjqz\" (UID: \"e520b140-ba86-4e17-82d2-4e8c4dc15474\") " pod="openstack/nova-cell1-conductor-db-sync-gqjqz" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.642679 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e520b140-ba86-4e17-82d2-4e8c4dc15474-scripts\") pod \"nova-cell1-conductor-db-sync-gqjqz\" (UID: \"e520b140-ba86-4e17-82d2-4e8c4dc15474\") " pod="openstack/nova-cell1-conductor-db-sync-gqjqz" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.662859 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bfb54f9b5-zbvpw" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.755237 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e520b140-ba86-4e17-82d2-4e8c4dc15474-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-gqjqz\" (UID: \"e520b140-ba86-4e17-82d2-4e8c4dc15474\") " pod="openstack/nova-cell1-conductor-db-sync-gqjqz" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.755681 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wtldk\" (UniqueName: \"kubernetes.io/projected/e520b140-ba86-4e17-82d2-4e8c4dc15474-kube-api-access-wtldk\") pod \"nova-cell1-conductor-db-sync-gqjqz\" (UID: \"e520b140-ba86-4e17-82d2-4e8c4dc15474\") " pod="openstack/nova-cell1-conductor-db-sync-gqjqz" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.755715 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e520b140-ba86-4e17-82d2-4e8c4dc15474-config-data\") pod \"nova-cell1-conductor-db-sync-gqjqz\" (UID: \"e520b140-ba86-4e17-82d2-4e8c4dc15474\") " pod="openstack/nova-cell1-conductor-db-sync-gqjqz" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.755829 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e520b140-ba86-4e17-82d2-4e8c4dc15474-scripts\") pod \"nova-cell1-conductor-db-sync-gqjqz\" (UID: \"e520b140-ba86-4e17-82d2-4e8c4dc15474\") " pod="openstack/nova-cell1-conductor-db-sync-gqjqz" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.764666 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e520b140-ba86-4e17-82d2-4e8c4dc15474-scripts\") pod \"nova-cell1-conductor-db-sync-gqjqz\" (UID: \"e520b140-ba86-4e17-82d2-4e8c4dc15474\") " pod="openstack/nova-cell1-conductor-db-sync-gqjqz" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.774402 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e520b140-ba86-4e17-82d2-4e8c4dc15474-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-gqjqz\" (UID: \"e520b140-ba86-4e17-82d2-4e8c4dc15474\") " pod="openstack/nova-cell1-conductor-db-sync-gqjqz" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.781838 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e520b140-ba86-4e17-82d2-4e8c4dc15474-config-data\") pod \"nova-cell1-conductor-db-sync-gqjqz\" (UID: \"e520b140-ba86-4e17-82d2-4e8c4dc15474\") " pod="openstack/nova-cell1-conductor-db-sync-gqjqz" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.797723 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wtldk\" (UniqueName: \"kubernetes.io/projected/e520b140-ba86-4e17-82d2-4e8c4dc15474-kube-api-access-wtldk\") pod \"nova-cell1-conductor-db-sync-gqjqz\" (UID: \"e520b140-ba86-4e17-82d2-4e8c4dc15474\") " pod="openstack/nova-cell1-conductor-db-sync-gqjqz" Jan 05 22:13:38 crc kubenswrapper[4910]: I0105 22:13:38.882846 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-gqjqz" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.271717 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.284719 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.302059 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 22:13:39 crc kubenswrapper[4910]: W0105 22:13:39.318503 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3ce21ad1_6aaf_494d_b4ef_7e99f4b31ff3.slice/crio-6f0e5856611e59e3566f5a3fda02c37e2a094979061f82b9b910493d7a638dc5 WatchSource:0}: Error finding container 6f0e5856611e59e3566f5a3fda02c37e2a094979061f82b9b910493d7a638dc5: Status 404 returned error can't find the container with id 6f0e5856611e59e3566f5a3fda02c37e2a094979061f82b9b910493d7a638dc5 Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.391414 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.435650 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bfb54f9b5-zbvpw"] Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.483444 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f3a1ce19-89a2-4aba-a60f-5338c61c2e87","Type":"ContainerStarted","Data":"d01c0563d58f6c5ab93d4aafe9fadec72e116d81a33646b7721fe34fafdcda1a"} Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.491617 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bfb54f9b5-zbvpw" event={"ID":"0a447ec9-7c46-472b-af0a-1c0633e4abf2","Type":"ContainerStarted","Data":"d6ebc9335df6a6464c6929dc8748349684b354d02157ee6e777576095af38727"} Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.494640 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3","Type":"ContainerStarted","Data":"6f0e5856611e59e3566f5a3fda02c37e2a094979061f82b9b910493d7a638dc5"} Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.500668 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8797e759-783d-406e-b7e4-2f184019d3a4","Type":"ContainerDied","Data":"423b250cb3286215590a96890151b078d2d38fd6808f33367c464804e9fa9733"} Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.500737 4910 scope.go:117] "RemoveContainer" containerID="0b75308431292941e9df942bb1fb78860ad7ff465742aaf9bb4f6edec7b26925" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.500797 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.502841 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"55390b37-0abf-4520-ae3e-6f361a2b3f17","Type":"ContainerStarted","Data":"b29a3e898bf668bba013aa653d49a865c0feec0c78c0a7944b22b86a2667c459"} Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.506175 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"a57cd3fa-1d29-4a8c-a85f-2735b92640a7","Type":"ContainerStarted","Data":"b01e904f20b5a81df6b1d9a649f82674dbcc9788619f29a7d344dc5f7844d717"} Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.509703 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-gqjqz"] Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.518785 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-7srlg" event={"ID":"44f006eb-f848-4351-914e-9a9e751194a3","Type":"ContainerStarted","Data":"7d5136f4f6ce7105da26c27dfc52656f8ab2a759bcfaccf2d8f969116e9751d5"} Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.548570 4910 scope.go:117] "RemoveContainer" containerID="37e564ca57594d4deb4b8bf6773d294d5ba687af7ebd0d6145e576355e4b91f6" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.549764 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-7srlg" podStartSLOduration=2.549719253 podStartE2EDuration="2.549719253s" podCreationTimestamp="2026-01-05 22:13:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:13:39.53963332 +0000 UTC m=+1351.117130990" watchObservedRunningTime="2026-01-05 22:13:39.549719253 +0000 UTC m=+1351.127216923" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.574128 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w5fdj\" (UniqueName: \"kubernetes.io/projected/8797e759-783d-406e-b7e4-2f184019d3a4-kube-api-access-w5fdj\") pod \"8797e759-783d-406e-b7e4-2f184019d3a4\" (UID: \"8797e759-783d-406e-b7e4-2f184019d3a4\") " Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.574410 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8797e759-783d-406e-b7e4-2f184019d3a4-log-httpd\") pod \"8797e759-783d-406e-b7e4-2f184019d3a4\" (UID: \"8797e759-783d-406e-b7e4-2f184019d3a4\") " Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.574550 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8797e759-783d-406e-b7e4-2f184019d3a4-combined-ca-bundle\") pod \"8797e759-783d-406e-b7e4-2f184019d3a4\" (UID: \"8797e759-783d-406e-b7e4-2f184019d3a4\") " Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.574712 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8797e759-783d-406e-b7e4-2f184019d3a4-config-data\") pod \"8797e759-783d-406e-b7e4-2f184019d3a4\" (UID: \"8797e759-783d-406e-b7e4-2f184019d3a4\") " Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.574784 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8797e759-783d-406e-b7e4-2f184019d3a4-scripts\") pod \"8797e759-783d-406e-b7e4-2f184019d3a4\" (UID: \"8797e759-783d-406e-b7e4-2f184019d3a4\") " Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.574928 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8797e759-783d-406e-b7e4-2f184019d3a4-sg-core-conf-yaml\") pod \"8797e759-783d-406e-b7e4-2f184019d3a4\" (UID: \"8797e759-783d-406e-b7e4-2f184019d3a4\") " Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.575017 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8797e759-783d-406e-b7e4-2f184019d3a4-run-httpd\") pod \"8797e759-783d-406e-b7e4-2f184019d3a4\" (UID: \"8797e759-783d-406e-b7e4-2f184019d3a4\") " Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.576239 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8797e759-783d-406e-b7e4-2f184019d3a4-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "8797e759-783d-406e-b7e4-2f184019d3a4" (UID: "8797e759-783d-406e-b7e4-2f184019d3a4"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.576740 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8797e759-783d-406e-b7e4-2f184019d3a4-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "8797e759-783d-406e-b7e4-2f184019d3a4" (UID: "8797e759-783d-406e-b7e4-2f184019d3a4"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.589354 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8797e759-783d-406e-b7e4-2f184019d3a4-kube-api-access-w5fdj" (OuterVolumeSpecName: "kube-api-access-w5fdj") pod "8797e759-783d-406e-b7e4-2f184019d3a4" (UID: "8797e759-783d-406e-b7e4-2f184019d3a4"). InnerVolumeSpecName "kube-api-access-w5fdj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.592789 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8797e759-783d-406e-b7e4-2f184019d3a4-scripts" (OuterVolumeSpecName: "scripts") pod "8797e759-783d-406e-b7e4-2f184019d3a4" (UID: "8797e759-783d-406e-b7e4-2f184019d3a4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.596074 4910 scope.go:117] "RemoveContainer" containerID="5cd62cd7ae636deadd27d3542cc98f43a8068e6a8412baf73498319181725686" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.625932 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8797e759-783d-406e-b7e4-2f184019d3a4-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "8797e759-783d-406e-b7e4-2f184019d3a4" (UID: "8797e759-783d-406e-b7e4-2f184019d3a4"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.634820 4910 scope.go:117] "RemoveContainer" containerID="8b2aaeeda5db7456c91ccc8d6a469af50cff132146c8f0688cf6672f7d4e7a1d" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.677344 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8797e759-783d-406e-b7e4-2f184019d3a4-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.679339 4910 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8797e759-783d-406e-b7e4-2f184019d3a4-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.679420 4910 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8797e759-783d-406e-b7e4-2f184019d3a4-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.679478 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w5fdj\" (UniqueName: \"kubernetes.io/projected/8797e759-783d-406e-b7e4-2f184019d3a4-kube-api-access-w5fdj\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.679533 4910 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8797e759-783d-406e-b7e4-2f184019d3a4-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.748105 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8797e759-783d-406e-b7e4-2f184019d3a4-config-data" (OuterVolumeSpecName: "config-data") pod "8797e759-783d-406e-b7e4-2f184019d3a4" (UID: "8797e759-783d-406e-b7e4-2f184019d3a4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.765932 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8797e759-783d-406e-b7e4-2f184019d3a4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8797e759-783d-406e-b7e4-2f184019d3a4" (UID: "8797e759-783d-406e-b7e4-2f184019d3a4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.782631 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8797e759-783d-406e-b7e4-2f184019d3a4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.782673 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8797e759-783d-406e-b7e4-2f184019d3a4-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.916962 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.948583 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.965549 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:13:39 crc kubenswrapper[4910]: E0105 22:13:39.966645 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8797e759-783d-406e-b7e4-2f184019d3a4" containerName="proxy-httpd" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.966667 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="8797e759-783d-406e-b7e4-2f184019d3a4" containerName="proxy-httpd" Jan 05 22:13:39 crc kubenswrapper[4910]: E0105 22:13:39.966691 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8797e759-783d-406e-b7e4-2f184019d3a4" containerName="sg-core" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.966697 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="8797e759-783d-406e-b7e4-2f184019d3a4" containerName="sg-core" Jan 05 22:13:39 crc kubenswrapper[4910]: E0105 22:13:39.966728 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8797e759-783d-406e-b7e4-2f184019d3a4" containerName="ceilometer-notification-agent" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.966734 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="8797e759-783d-406e-b7e4-2f184019d3a4" containerName="ceilometer-notification-agent" Jan 05 22:13:39 crc kubenswrapper[4910]: E0105 22:13:39.966747 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8797e759-783d-406e-b7e4-2f184019d3a4" containerName="ceilometer-central-agent" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.966753 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="8797e759-783d-406e-b7e4-2f184019d3a4" containerName="ceilometer-central-agent" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.967192 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="8797e759-783d-406e-b7e4-2f184019d3a4" containerName="proxy-httpd" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.967213 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="8797e759-783d-406e-b7e4-2f184019d3a4" containerName="ceilometer-notification-agent" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.967232 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="8797e759-783d-406e-b7e4-2f184019d3a4" containerName="sg-core" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.967248 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="8797e759-783d-406e-b7e4-2f184019d3a4" containerName="ceilometer-central-agent" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.971647 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.974846 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.975189 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.975369 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 05 22:13:39 crc kubenswrapper[4910]: I0105 22:13:39.982444 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:13:40 crc kubenswrapper[4910]: E0105 22:13:40.047180 4910 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8797e759_783d_406e_b7e4_2f184019d3a4.slice/crio-423b250cb3286215590a96890151b078d2d38fd6808f33367c464804e9fa9733\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8797e759_783d_406e_b7e4_2f184019d3a4.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0a447ec9_7c46_472b_af0a_1c0633e4abf2.slice/crio-ecf9d4442ec5888639a8263fc99dcb78cd90e05e8c3b8c08265be6959307e61d.scope\": RecentStats: unable to find data in memory cache]" Jan 05 22:13:40 crc kubenswrapper[4910]: I0105 22:13:40.098263 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f2250f2-3745-4cfd-8431-9d653c587b63-config-data\") pod \"ceilometer-0\" (UID: \"3f2250f2-3745-4cfd-8431-9d653c587b63\") " pod="openstack/ceilometer-0" Jan 05 22:13:40 crc kubenswrapper[4910]: I0105 22:13:40.098605 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3f2250f2-3745-4cfd-8431-9d653c587b63-run-httpd\") pod \"ceilometer-0\" (UID: \"3f2250f2-3745-4cfd-8431-9d653c587b63\") " pod="openstack/ceilometer-0" Jan 05 22:13:40 crc kubenswrapper[4910]: I0105 22:13:40.098700 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3f2250f2-3745-4cfd-8431-9d653c587b63-log-httpd\") pod \"ceilometer-0\" (UID: \"3f2250f2-3745-4cfd-8431-9d653c587b63\") " pod="openstack/ceilometer-0" Jan 05 22:13:40 crc kubenswrapper[4910]: I0105 22:13:40.098800 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r9m96\" (UniqueName: \"kubernetes.io/projected/3f2250f2-3745-4cfd-8431-9d653c587b63-kube-api-access-r9m96\") pod \"ceilometer-0\" (UID: \"3f2250f2-3745-4cfd-8431-9d653c587b63\") " pod="openstack/ceilometer-0" Jan 05 22:13:40 crc kubenswrapper[4910]: I0105 22:13:40.098871 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f2250f2-3745-4cfd-8431-9d653c587b63-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3f2250f2-3745-4cfd-8431-9d653c587b63\") " pod="openstack/ceilometer-0" Jan 05 22:13:40 crc kubenswrapper[4910]: I0105 22:13:40.098985 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3f2250f2-3745-4cfd-8431-9d653c587b63-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3f2250f2-3745-4cfd-8431-9d653c587b63\") " pod="openstack/ceilometer-0" Jan 05 22:13:40 crc kubenswrapper[4910]: I0105 22:13:40.099057 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f2250f2-3745-4cfd-8431-9d653c587b63-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3f2250f2-3745-4cfd-8431-9d653c587b63\") " pod="openstack/ceilometer-0" Jan 05 22:13:40 crc kubenswrapper[4910]: I0105 22:13:40.099555 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f2250f2-3745-4cfd-8431-9d653c587b63-scripts\") pod \"ceilometer-0\" (UID: \"3f2250f2-3745-4cfd-8431-9d653c587b63\") " pod="openstack/ceilometer-0" Jan 05 22:13:40 crc kubenswrapper[4910]: I0105 22:13:40.201433 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f2250f2-3745-4cfd-8431-9d653c587b63-scripts\") pod \"ceilometer-0\" (UID: \"3f2250f2-3745-4cfd-8431-9d653c587b63\") " pod="openstack/ceilometer-0" Jan 05 22:13:40 crc kubenswrapper[4910]: I0105 22:13:40.201549 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f2250f2-3745-4cfd-8431-9d653c587b63-config-data\") pod \"ceilometer-0\" (UID: \"3f2250f2-3745-4cfd-8431-9d653c587b63\") " pod="openstack/ceilometer-0" Jan 05 22:13:40 crc kubenswrapper[4910]: I0105 22:13:40.201617 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3f2250f2-3745-4cfd-8431-9d653c587b63-run-httpd\") pod \"ceilometer-0\" (UID: \"3f2250f2-3745-4cfd-8431-9d653c587b63\") " pod="openstack/ceilometer-0" Jan 05 22:13:40 crc kubenswrapper[4910]: I0105 22:13:40.201640 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3f2250f2-3745-4cfd-8431-9d653c587b63-log-httpd\") pod \"ceilometer-0\" (UID: \"3f2250f2-3745-4cfd-8431-9d653c587b63\") " pod="openstack/ceilometer-0" Jan 05 22:13:40 crc kubenswrapper[4910]: I0105 22:13:40.201706 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r9m96\" (UniqueName: \"kubernetes.io/projected/3f2250f2-3745-4cfd-8431-9d653c587b63-kube-api-access-r9m96\") pod \"ceilometer-0\" (UID: \"3f2250f2-3745-4cfd-8431-9d653c587b63\") " pod="openstack/ceilometer-0" Jan 05 22:13:40 crc kubenswrapper[4910]: I0105 22:13:40.201755 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f2250f2-3745-4cfd-8431-9d653c587b63-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3f2250f2-3745-4cfd-8431-9d653c587b63\") " pod="openstack/ceilometer-0" Jan 05 22:13:40 crc kubenswrapper[4910]: I0105 22:13:40.201833 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3f2250f2-3745-4cfd-8431-9d653c587b63-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3f2250f2-3745-4cfd-8431-9d653c587b63\") " pod="openstack/ceilometer-0" Jan 05 22:13:40 crc kubenswrapper[4910]: I0105 22:13:40.201856 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f2250f2-3745-4cfd-8431-9d653c587b63-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3f2250f2-3745-4cfd-8431-9d653c587b63\") " pod="openstack/ceilometer-0" Jan 05 22:13:40 crc kubenswrapper[4910]: I0105 22:13:40.202547 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3f2250f2-3745-4cfd-8431-9d653c587b63-run-httpd\") pod \"ceilometer-0\" (UID: \"3f2250f2-3745-4cfd-8431-9d653c587b63\") " pod="openstack/ceilometer-0" Jan 05 22:13:40 crc kubenswrapper[4910]: I0105 22:13:40.206018 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3f2250f2-3745-4cfd-8431-9d653c587b63-log-httpd\") pod \"ceilometer-0\" (UID: \"3f2250f2-3745-4cfd-8431-9d653c587b63\") " pod="openstack/ceilometer-0" Jan 05 22:13:40 crc kubenswrapper[4910]: I0105 22:13:40.209543 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f2250f2-3745-4cfd-8431-9d653c587b63-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3f2250f2-3745-4cfd-8431-9d653c587b63\") " pod="openstack/ceilometer-0" Jan 05 22:13:40 crc kubenswrapper[4910]: I0105 22:13:40.212038 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3f2250f2-3745-4cfd-8431-9d653c587b63-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3f2250f2-3745-4cfd-8431-9d653c587b63\") " pod="openstack/ceilometer-0" Jan 05 22:13:40 crc kubenswrapper[4910]: I0105 22:13:40.212628 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f2250f2-3745-4cfd-8431-9d653c587b63-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3f2250f2-3745-4cfd-8431-9d653c587b63\") " pod="openstack/ceilometer-0" Jan 05 22:13:40 crc kubenswrapper[4910]: I0105 22:13:40.216560 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f2250f2-3745-4cfd-8431-9d653c587b63-scripts\") pod \"ceilometer-0\" (UID: \"3f2250f2-3745-4cfd-8431-9d653c587b63\") " pod="openstack/ceilometer-0" Jan 05 22:13:40 crc kubenswrapper[4910]: I0105 22:13:40.220383 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r9m96\" (UniqueName: \"kubernetes.io/projected/3f2250f2-3745-4cfd-8431-9d653c587b63-kube-api-access-r9m96\") pod \"ceilometer-0\" (UID: \"3f2250f2-3745-4cfd-8431-9d653c587b63\") " pod="openstack/ceilometer-0" Jan 05 22:13:40 crc kubenswrapper[4910]: I0105 22:13:40.235037 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f2250f2-3745-4cfd-8431-9d653c587b63-config-data\") pod \"ceilometer-0\" (UID: \"3f2250f2-3745-4cfd-8431-9d653c587b63\") " pod="openstack/ceilometer-0" Jan 05 22:13:40 crc kubenswrapper[4910]: I0105 22:13:40.310651 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 22:13:40 crc kubenswrapper[4910]: I0105 22:13:40.543106 4910 generic.go:334] "Generic (PLEG): container finished" podID="0a447ec9-7c46-472b-af0a-1c0633e4abf2" containerID="ecf9d4442ec5888639a8263fc99dcb78cd90e05e8c3b8c08265be6959307e61d" exitCode=0 Jan 05 22:13:40 crc kubenswrapper[4910]: I0105 22:13:40.543182 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bfb54f9b5-zbvpw" event={"ID":"0a447ec9-7c46-472b-af0a-1c0633e4abf2","Type":"ContainerDied","Data":"ecf9d4442ec5888639a8263fc99dcb78cd90e05e8c3b8c08265be6959307e61d"} Jan 05 22:13:40 crc kubenswrapper[4910]: I0105 22:13:40.552762 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-gqjqz" event={"ID":"e520b140-ba86-4e17-82d2-4e8c4dc15474","Type":"ContainerStarted","Data":"5e825dd38b907536857a97445f1e68ba17937cbaba208d22383b08c67caa5ac5"} Jan 05 22:13:40 crc kubenswrapper[4910]: I0105 22:13:40.552802 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-gqjqz" event={"ID":"e520b140-ba86-4e17-82d2-4e8c4dc15474","Type":"ContainerStarted","Data":"ba23b6f09b27fd72fcb26a9a07fd7faa5fd559ec7a4373b9676bb3b4cc71928c"} Jan 05 22:13:40 crc kubenswrapper[4910]: I0105 22:13:40.590816 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-gqjqz" podStartSLOduration=2.590796624 podStartE2EDuration="2.590796624s" podCreationTimestamp="2026-01-05 22:13:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:13:40.585080381 +0000 UTC m=+1352.162578061" watchObservedRunningTime="2026-01-05 22:13:40.590796624 +0000 UTC m=+1352.168294284" Jan 05 22:13:40 crc kubenswrapper[4910]: I0105 22:13:40.735524 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8797e759-783d-406e-b7e4-2f184019d3a4" path="/var/lib/kubelet/pods/8797e759-783d-406e-b7e4-2f184019d3a4/volumes" Jan 05 22:13:40 crc kubenswrapper[4910]: I0105 22:13:40.839560 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:13:41 crc kubenswrapper[4910]: I0105 22:13:41.243831 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 05 22:13:41 crc kubenswrapper[4910]: I0105 22:13:41.255922 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 22:13:41 crc kubenswrapper[4910]: I0105 22:13:41.565689 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bfb54f9b5-zbvpw" event={"ID":"0a447ec9-7c46-472b-af0a-1c0633e4abf2","Type":"ContainerStarted","Data":"b179dd5d1002d586fe0572c6bcbeb49af72f867d6a5101a1fc80bd913f604cff"} Jan 05 22:13:41 crc kubenswrapper[4910]: I0105 22:13:41.566050 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5bfb54f9b5-zbvpw" Jan 05 22:13:41 crc kubenswrapper[4910]: I0105 22:13:41.587642 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5bfb54f9b5-zbvpw" podStartSLOduration=3.587615057 podStartE2EDuration="3.587615057s" podCreationTimestamp="2026-01-05 22:13:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:13:41.585192877 +0000 UTC m=+1353.162690567" watchObservedRunningTime="2026-01-05 22:13:41.587615057 +0000 UTC m=+1353.165112727" Jan 05 22:13:41 crc kubenswrapper[4910]: W0105 22:13:41.623839 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3f2250f2_3745_4cfd_8431_9d653c587b63.slice/crio-891455f579bc051763e37c94c472409763e8a997eed82f29ebc22f2aa0974e36 WatchSource:0}: Error finding container 891455f579bc051763e37c94c472409763e8a997eed82f29ebc22f2aa0974e36: Status 404 returned error can't find the container with id 891455f579bc051763e37c94c472409763e8a997eed82f29ebc22f2aa0974e36 Jan 05 22:13:42 crc kubenswrapper[4910]: I0105 22:13:42.073566 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 05 22:13:42 crc kubenswrapper[4910]: I0105 22:13:42.584608 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3f2250f2-3745-4cfd-8431-9d653c587b63","Type":"ContainerStarted","Data":"891455f579bc051763e37c94c472409763e8a997eed82f29ebc22f2aa0974e36"} Jan 05 22:13:43 crc kubenswrapper[4910]: I0105 22:13:43.597559 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3","Type":"ContainerStarted","Data":"97a996baccc5a9fdf261faac05af6a9d922ac833eb10d27ac2a7070fc4f4745d"} Jan 05 22:13:43 crc kubenswrapper[4910]: I0105 22:13:43.599955 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3f2250f2-3745-4cfd-8431-9d653c587b63","Type":"ContainerStarted","Data":"d093b482c421adc7d82da4245d88adc7b270620fd4852fc0d78a8373c23ea00e"} Jan 05 22:13:43 crc kubenswrapper[4910]: I0105 22:13:43.602805 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"55390b37-0abf-4520-ae3e-6f361a2b3f17","Type":"ContainerStarted","Data":"80e6da2bb90d9c3d1a96d9dce6a11be58fb7c39b2ab143358753f2b371ac141e"} Jan 05 22:13:43 crc kubenswrapper[4910]: I0105 22:13:43.602892 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"55390b37-0abf-4520-ae3e-6f361a2b3f17","Type":"ContainerStarted","Data":"cec591e146b576f40e19764d655ead555ad982542f05b667990292f76be5bae5"} Jan 05 22:13:43 crc kubenswrapper[4910]: I0105 22:13:43.604726 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"a57cd3fa-1d29-4a8c-a85f-2735b92640a7","Type":"ContainerStarted","Data":"7851b7975bed40518031113c597026bfbf2f70db38691075fd3f8e3c99a934cf"} Jan 05 22:13:43 crc kubenswrapper[4910]: I0105 22:13:43.605082 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="a57cd3fa-1d29-4a8c-a85f-2735b92640a7" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://7851b7975bed40518031113c597026bfbf2f70db38691075fd3f8e3c99a934cf" gracePeriod=30 Jan 05 22:13:43 crc kubenswrapper[4910]: I0105 22:13:43.606882 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f3a1ce19-89a2-4aba-a60f-5338c61c2e87","Type":"ContainerStarted","Data":"f15e447481e048a4b60bda455eea09289b0c3558272334429119512425e4d932"} Jan 05 22:13:43 crc kubenswrapper[4910]: I0105 22:13:43.606916 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f3a1ce19-89a2-4aba-a60f-5338c61c2e87","Type":"ContainerStarted","Data":"654f50da996359a3ef1b482aae55c95262b4394474fc45bbdb27cce75fcd3e71"} Jan 05 22:13:43 crc kubenswrapper[4910]: I0105 22:13:43.607018 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="f3a1ce19-89a2-4aba-a60f-5338c61c2e87" containerName="nova-metadata-log" containerID="cri-o://654f50da996359a3ef1b482aae55c95262b4394474fc45bbdb27cce75fcd3e71" gracePeriod=30 Jan 05 22:13:43 crc kubenswrapper[4910]: I0105 22:13:43.607070 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="f3a1ce19-89a2-4aba-a60f-5338c61c2e87" containerName="nova-metadata-metadata" containerID="cri-o://f15e447481e048a4b60bda455eea09289b0c3558272334429119512425e4d932" gracePeriod=30 Jan 05 22:13:43 crc kubenswrapper[4910]: I0105 22:13:43.642182 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.473688371 podStartE2EDuration="6.642150687s" podCreationTimestamp="2026-01-05 22:13:37 +0000 UTC" firstStartedPulling="2026-01-05 22:13:39.3307857 +0000 UTC m=+1350.908283370" lastFinishedPulling="2026-01-05 22:13:42.499248016 +0000 UTC m=+1354.076745686" observedRunningTime="2026-01-05 22:13:43.625436299 +0000 UTC m=+1355.202933979" watchObservedRunningTime="2026-01-05 22:13:43.642150687 +0000 UTC m=+1355.219648357" Jan 05 22:13:43 crc kubenswrapper[4910]: I0105 22:13:43.686786 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.495779535 podStartE2EDuration="6.686767215s" podCreationTimestamp="2026-01-05 22:13:37 +0000 UTC" firstStartedPulling="2026-01-05 22:13:39.30841317 +0000 UTC m=+1350.885910840" lastFinishedPulling="2026-01-05 22:13:42.49940085 +0000 UTC m=+1354.076898520" observedRunningTime="2026-01-05 22:13:43.684889717 +0000 UTC m=+1355.262387387" watchObservedRunningTime="2026-01-05 22:13:43.686767215 +0000 UTC m=+1355.264264885" Jan 05 22:13:43 crc kubenswrapper[4910]: I0105 22:13:43.691533 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.815326535 podStartE2EDuration="6.691516053s" podCreationTimestamp="2026-01-05 22:13:37 +0000 UTC" firstStartedPulling="2026-01-05 22:13:38.633062999 +0000 UTC m=+1350.210560669" lastFinishedPulling="2026-01-05 22:13:42.509252517 +0000 UTC m=+1354.086750187" observedRunningTime="2026-01-05 22:13:43.665844741 +0000 UTC m=+1355.243342421" watchObservedRunningTime="2026-01-05 22:13:43.691516053 +0000 UTC m=+1355.269013723" Jan 05 22:13:43 crc kubenswrapper[4910]: I0105 22:13:43.719739 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=3.508829682 podStartE2EDuration="6.71971597s" podCreationTimestamp="2026-01-05 22:13:37 +0000 UTC" firstStartedPulling="2026-01-05 22:13:39.288496201 +0000 UTC m=+1350.865993871" lastFinishedPulling="2026-01-05 22:13:42.499382499 +0000 UTC m=+1354.076880159" observedRunningTime="2026-01-05 22:13:43.706767935 +0000 UTC m=+1355.284265615" watchObservedRunningTime="2026-01-05 22:13:43.71971597 +0000 UTC m=+1355.297213630" Jan 05 22:13:44 crc kubenswrapper[4910]: I0105 22:13:44.624597 4910 generic.go:334] "Generic (PLEG): container finished" podID="f3a1ce19-89a2-4aba-a60f-5338c61c2e87" containerID="f15e447481e048a4b60bda455eea09289b0c3558272334429119512425e4d932" exitCode=0 Jan 05 22:13:44 crc kubenswrapper[4910]: I0105 22:13:44.624932 4910 generic.go:334] "Generic (PLEG): container finished" podID="f3a1ce19-89a2-4aba-a60f-5338c61c2e87" containerID="654f50da996359a3ef1b482aae55c95262b4394474fc45bbdb27cce75fcd3e71" exitCode=143 Jan 05 22:13:44 crc kubenswrapper[4910]: I0105 22:13:44.624693 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f3a1ce19-89a2-4aba-a60f-5338c61c2e87","Type":"ContainerDied","Data":"f15e447481e048a4b60bda455eea09289b0c3558272334429119512425e4d932"} Jan 05 22:13:44 crc kubenswrapper[4910]: I0105 22:13:44.625041 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f3a1ce19-89a2-4aba-a60f-5338c61c2e87","Type":"ContainerDied","Data":"654f50da996359a3ef1b482aae55c95262b4394474fc45bbdb27cce75fcd3e71"} Jan 05 22:13:44 crc kubenswrapper[4910]: I0105 22:13:44.628445 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3f2250f2-3745-4cfd-8431-9d653c587b63","Type":"ContainerStarted","Data":"999c2f7ca6118b09dbf9b8c9a92383cb7a09467572a0e86765bb0a727fb5a5d6"} Jan 05 22:13:44 crc kubenswrapper[4910]: I0105 22:13:44.950958 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.038816 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3a1ce19-89a2-4aba-a60f-5338c61c2e87-combined-ca-bundle\") pod \"f3a1ce19-89a2-4aba-a60f-5338c61c2e87\" (UID: \"f3a1ce19-89a2-4aba-a60f-5338c61c2e87\") " Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.038978 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t8d9r\" (UniqueName: \"kubernetes.io/projected/f3a1ce19-89a2-4aba-a60f-5338c61c2e87-kube-api-access-t8d9r\") pod \"f3a1ce19-89a2-4aba-a60f-5338c61c2e87\" (UID: \"f3a1ce19-89a2-4aba-a60f-5338c61c2e87\") " Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.039105 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3a1ce19-89a2-4aba-a60f-5338c61c2e87-logs\") pod \"f3a1ce19-89a2-4aba-a60f-5338c61c2e87\" (UID: \"f3a1ce19-89a2-4aba-a60f-5338c61c2e87\") " Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.039281 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3a1ce19-89a2-4aba-a60f-5338c61c2e87-config-data\") pod \"f3a1ce19-89a2-4aba-a60f-5338c61c2e87\" (UID: \"f3a1ce19-89a2-4aba-a60f-5338c61c2e87\") " Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.039523 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f3a1ce19-89a2-4aba-a60f-5338c61c2e87-logs" (OuterVolumeSpecName: "logs") pod "f3a1ce19-89a2-4aba-a60f-5338c61c2e87" (UID: "f3a1ce19-89a2-4aba-a60f-5338c61c2e87"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.039817 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3a1ce19-89a2-4aba-a60f-5338c61c2e87-logs\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.044930 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f3a1ce19-89a2-4aba-a60f-5338c61c2e87-kube-api-access-t8d9r" (OuterVolumeSpecName: "kube-api-access-t8d9r") pod "f3a1ce19-89a2-4aba-a60f-5338c61c2e87" (UID: "f3a1ce19-89a2-4aba-a60f-5338c61c2e87"). InnerVolumeSpecName "kube-api-access-t8d9r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.074958 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3a1ce19-89a2-4aba-a60f-5338c61c2e87-config-data" (OuterVolumeSpecName: "config-data") pod "f3a1ce19-89a2-4aba-a60f-5338c61c2e87" (UID: "f3a1ce19-89a2-4aba-a60f-5338c61c2e87"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.077993 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3a1ce19-89a2-4aba-a60f-5338c61c2e87-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f3a1ce19-89a2-4aba-a60f-5338c61c2e87" (UID: "f3a1ce19-89a2-4aba-a60f-5338c61c2e87"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.141821 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t8d9r\" (UniqueName: \"kubernetes.io/projected/f3a1ce19-89a2-4aba-a60f-5338c61c2e87-kube-api-access-t8d9r\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.141862 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3a1ce19-89a2-4aba-a60f-5338c61c2e87-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.141875 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3a1ce19-89a2-4aba-a60f-5338c61c2e87-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.645170 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f3a1ce19-89a2-4aba-a60f-5338c61c2e87","Type":"ContainerDied","Data":"d01c0563d58f6c5ab93d4aafe9fadec72e116d81a33646b7721fe34fafdcda1a"} Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.645238 4910 scope.go:117] "RemoveContainer" containerID="f15e447481e048a4b60bda455eea09289b0c3558272334429119512425e4d932" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.645398 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.691806 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.698572 4910 scope.go:117] "RemoveContainer" containerID="654f50da996359a3ef1b482aae55c95262b4394474fc45bbdb27cce75fcd3e71" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.713373 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.723409 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 05 22:13:45 crc kubenswrapper[4910]: E0105 22:13:45.723873 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3a1ce19-89a2-4aba-a60f-5338c61c2e87" containerName="nova-metadata-metadata" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.723893 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3a1ce19-89a2-4aba-a60f-5338c61c2e87" containerName="nova-metadata-metadata" Jan 05 22:13:45 crc kubenswrapper[4910]: E0105 22:13:45.723938 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3a1ce19-89a2-4aba-a60f-5338c61c2e87" containerName="nova-metadata-log" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.723945 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3a1ce19-89a2-4aba-a60f-5338c61c2e87" containerName="nova-metadata-log" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.724112 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3a1ce19-89a2-4aba-a60f-5338c61c2e87" containerName="nova-metadata-metadata" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.724149 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3a1ce19-89a2-4aba-a60f-5338c61c2e87" containerName="nova-metadata-log" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.725182 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.731571 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.731586 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.732619 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.857928 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/da681308-73a5-4c6c-a1db-c2bf0dde126c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"da681308-73a5-4c6c-a1db-c2bf0dde126c\") " pod="openstack/nova-metadata-0" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.858189 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da681308-73a5-4c6c-a1db-c2bf0dde126c-config-data\") pod \"nova-metadata-0\" (UID: \"da681308-73a5-4c6c-a1db-c2bf0dde126c\") " pod="openstack/nova-metadata-0" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.858231 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da681308-73a5-4c6c-a1db-c2bf0dde126c-logs\") pod \"nova-metadata-0\" (UID: \"da681308-73a5-4c6c-a1db-c2bf0dde126c\") " pod="openstack/nova-metadata-0" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.858251 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f247k\" (UniqueName: \"kubernetes.io/projected/da681308-73a5-4c6c-a1db-c2bf0dde126c-kube-api-access-f247k\") pod \"nova-metadata-0\" (UID: \"da681308-73a5-4c6c-a1db-c2bf0dde126c\") " pod="openstack/nova-metadata-0" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.858302 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da681308-73a5-4c6c-a1db-c2bf0dde126c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"da681308-73a5-4c6c-a1db-c2bf0dde126c\") " pod="openstack/nova-metadata-0" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.960866 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da681308-73a5-4c6c-a1db-c2bf0dde126c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"da681308-73a5-4c6c-a1db-c2bf0dde126c\") " pod="openstack/nova-metadata-0" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.960946 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/da681308-73a5-4c6c-a1db-c2bf0dde126c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"da681308-73a5-4c6c-a1db-c2bf0dde126c\") " pod="openstack/nova-metadata-0" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.961197 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da681308-73a5-4c6c-a1db-c2bf0dde126c-config-data\") pod \"nova-metadata-0\" (UID: \"da681308-73a5-4c6c-a1db-c2bf0dde126c\") " pod="openstack/nova-metadata-0" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.961269 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da681308-73a5-4c6c-a1db-c2bf0dde126c-logs\") pod \"nova-metadata-0\" (UID: \"da681308-73a5-4c6c-a1db-c2bf0dde126c\") " pod="openstack/nova-metadata-0" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.961304 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f247k\" (UniqueName: \"kubernetes.io/projected/da681308-73a5-4c6c-a1db-c2bf0dde126c-kube-api-access-f247k\") pod \"nova-metadata-0\" (UID: \"da681308-73a5-4c6c-a1db-c2bf0dde126c\") " pod="openstack/nova-metadata-0" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.961760 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da681308-73a5-4c6c-a1db-c2bf0dde126c-logs\") pod \"nova-metadata-0\" (UID: \"da681308-73a5-4c6c-a1db-c2bf0dde126c\") " pod="openstack/nova-metadata-0" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.969421 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/da681308-73a5-4c6c-a1db-c2bf0dde126c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"da681308-73a5-4c6c-a1db-c2bf0dde126c\") " pod="openstack/nova-metadata-0" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.969920 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da681308-73a5-4c6c-a1db-c2bf0dde126c-config-data\") pod \"nova-metadata-0\" (UID: \"da681308-73a5-4c6c-a1db-c2bf0dde126c\") " pod="openstack/nova-metadata-0" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.970094 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da681308-73a5-4c6c-a1db-c2bf0dde126c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"da681308-73a5-4c6c-a1db-c2bf0dde126c\") " pod="openstack/nova-metadata-0" Jan 05 22:13:45 crc kubenswrapper[4910]: I0105 22:13:45.981345 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f247k\" (UniqueName: \"kubernetes.io/projected/da681308-73a5-4c6c-a1db-c2bf0dde126c-kube-api-access-f247k\") pod \"nova-metadata-0\" (UID: \"da681308-73a5-4c6c-a1db-c2bf0dde126c\") " pod="openstack/nova-metadata-0" Jan 05 22:13:46 crc kubenswrapper[4910]: I0105 22:13:46.053430 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 05 22:13:46 crc kubenswrapper[4910]: I0105 22:13:46.547809 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 22:13:46 crc kubenswrapper[4910]: W0105 22:13:46.552931 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podda681308_73a5_4c6c_a1db_c2bf0dde126c.slice/crio-cbad3fa2b1dba88c1bc07e75fbbfda000be45793b586e386a7f002889e3171e0 WatchSource:0}: Error finding container cbad3fa2b1dba88c1bc07e75fbbfda000be45793b586e386a7f002889e3171e0: Status 404 returned error can't find the container with id cbad3fa2b1dba88c1bc07e75fbbfda000be45793b586e386a7f002889e3171e0 Jan 05 22:13:46 crc kubenswrapper[4910]: I0105 22:13:46.658039 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"da681308-73a5-4c6c-a1db-c2bf0dde126c","Type":"ContainerStarted","Data":"cbad3fa2b1dba88c1bc07e75fbbfda000be45793b586e386a7f002889e3171e0"} Jan 05 22:13:46 crc kubenswrapper[4910]: I0105 22:13:46.668468 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3f2250f2-3745-4cfd-8431-9d653c587b63","Type":"ContainerStarted","Data":"ef1d83a2d2057417c628c88f388bdfbf088c3937e36d5b1f7cbfa026594aa33d"} Jan 05 22:13:46 crc kubenswrapper[4910]: I0105 22:13:46.746932 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f3a1ce19-89a2-4aba-a60f-5338c61c2e87" path="/var/lib/kubelet/pods/f3a1ce19-89a2-4aba-a60f-5338c61c2e87/volumes" Jan 05 22:13:47 crc kubenswrapper[4910]: I0105 22:13:47.680191 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"da681308-73a5-4c6c-a1db-c2bf0dde126c","Type":"ContainerStarted","Data":"606e6f2e3497ecb01d14072492d8281fecb70b6d64f8c39aa3b524fbcf3ac680"} Jan 05 22:13:47 crc kubenswrapper[4910]: I0105 22:13:47.680667 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"da681308-73a5-4c6c-a1db-c2bf0dde126c","Type":"ContainerStarted","Data":"5bd015bbe2da84b1fc80cbd102483c8e213499eb74b2b27c295d59d34ab08421"} Jan 05 22:13:47 crc kubenswrapper[4910]: I0105 22:13:47.683988 4910 generic.go:334] "Generic (PLEG): container finished" podID="44f006eb-f848-4351-914e-9a9e751194a3" containerID="7d5136f4f6ce7105da26c27dfc52656f8ab2a759bcfaccf2d8f969116e9751d5" exitCode=0 Jan 05 22:13:47 crc kubenswrapper[4910]: I0105 22:13:47.684044 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-7srlg" event={"ID":"44f006eb-f848-4351-914e-9a9e751194a3","Type":"ContainerDied","Data":"7d5136f4f6ce7105da26c27dfc52656f8ab2a759bcfaccf2d8f969116e9751d5"} Jan 05 22:13:47 crc kubenswrapper[4910]: I0105 22:13:47.705520 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.7054949329999998 podStartE2EDuration="2.705494933s" podCreationTimestamp="2026-01-05 22:13:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:13:47.70338066 +0000 UTC m=+1359.280878340" watchObservedRunningTime="2026-01-05 22:13:47.705494933 +0000 UTC m=+1359.282992593" Jan 05 22:13:47 crc kubenswrapper[4910]: I0105 22:13:47.900638 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 05 22:13:47 crc kubenswrapper[4910]: I0105 22:13:47.900771 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 05 22:13:48 crc kubenswrapper[4910]: I0105 22:13:48.271756 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 05 22:13:48 crc kubenswrapper[4910]: I0105 22:13:48.272319 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 05 22:13:48 crc kubenswrapper[4910]: I0105 22:13:48.295269 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:13:48 crc kubenswrapper[4910]: I0105 22:13:48.302599 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 05 22:13:48 crc kubenswrapper[4910]: I0105 22:13:48.664329 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5bfb54f9b5-zbvpw" Jan 05 22:13:48 crc kubenswrapper[4910]: I0105 22:13:48.698979 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3f2250f2-3745-4cfd-8431-9d653c587b63","Type":"ContainerStarted","Data":"852ee4e47f911c7c2fcd4579b638cfe58eb083e3d449b414069d34e2e083e724"} Jan 05 22:13:48 crc kubenswrapper[4910]: I0105 22:13:48.699883 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 05 22:13:48 crc kubenswrapper[4910]: I0105 22:13:48.781739 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=4.1771347930000005 podStartE2EDuration="9.781715094s" podCreationTimestamp="2026-01-05 22:13:39 +0000 UTC" firstStartedPulling="2026-01-05 22:13:42.386242647 +0000 UTC m=+1353.963740317" lastFinishedPulling="2026-01-05 22:13:47.990822938 +0000 UTC m=+1359.568320618" observedRunningTime="2026-01-05 22:13:48.745895487 +0000 UTC m=+1360.323393167" watchObservedRunningTime="2026-01-05 22:13:48.781715094 +0000 UTC m=+1360.359212764" Jan 05 22:13:48 crc kubenswrapper[4910]: I0105 22:13:48.800506 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b4f5fc4f-tddh2"] Jan 05 22:13:48 crc kubenswrapper[4910]: I0105 22:13:48.800680 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 05 22:13:48 crc kubenswrapper[4910]: I0105 22:13:48.800800 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6b4f5fc4f-tddh2" podUID="649fef5f-2881-49d4-8bf4-1cf8e93a87b3" containerName="dnsmasq-dns" containerID="cri-o://35bc838138999a85ba33aa625ee381aff61ed72cb60e68c0f63973fd33ed2d44" gracePeriod=10 Jan 05 22:13:48 crc kubenswrapper[4910]: I0105 22:13:48.994292 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="55390b37-0abf-4520-ae3e-6f361a2b3f17" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.183:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 05 22:13:48 crc kubenswrapper[4910]: I0105 22:13:48.994455 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="55390b37-0abf-4520-ae3e-6f361a2b3f17" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.183:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.188037 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-7srlg" Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.362423 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44f006eb-f848-4351-914e-9a9e751194a3-combined-ca-bundle\") pod \"44f006eb-f848-4351-914e-9a9e751194a3\" (UID: \"44f006eb-f848-4351-914e-9a9e751194a3\") " Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.362519 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tjqhf\" (UniqueName: \"kubernetes.io/projected/44f006eb-f848-4351-914e-9a9e751194a3-kube-api-access-tjqhf\") pod \"44f006eb-f848-4351-914e-9a9e751194a3\" (UID: \"44f006eb-f848-4351-914e-9a9e751194a3\") " Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.362545 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/44f006eb-f848-4351-914e-9a9e751194a3-scripts\") pod \"44f006eb-f848-4351-914e-9a9e751194a3\" (UID: \"44f006eb-f848-4351-914e-9a9e751194a3\") " Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.362597 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44f006eb-f848-4351-914e-9a9e751194a3-config-data\") pod \"44f006eb-f848-4351-914e-9a9e751194a3\" (UID: \"44f006eb-f848-4351-914e-9a9e751194a3\") " Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.378504 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44f006eb-f848-4351-914e-9a9e751194a3-scripts" (OuterVolumeSpecName: "scripts") pod "44f006eb-f848-4351-914e-9a9e751194a3" (UID: "44f006eb-f848-4351-914e-9a9e751194a3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.380605 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44f006eb-f848-4351-914e-9a9e751194a3-kube-api-access-tjqhf" (OuterVolumeSpecName: "kube-api-access-tjqhf") pod "44f006eb-f848-4351-914e-9a9e751194a3" (UID: "44f006eb-f848-4351-914e-9a9e751194a3"). InnerVolumeSpecName "kube-api-access-tjqhf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.417610 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44f006eb-f848-4351-914e-9a9e751194a3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "44f006eb-f848-4351-914e-9a9e751194a3" (UID: "44f006eb-f848-4351-914e-9a9e751194a3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.422293 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44f006eb-f848-4351-914e-9a9e751194a3-config-data" (OuterVolumeSpecName: "config-data") pod "44f006eb-f848-4351-914e-9a9e751194a3" (UID: "44f006eb-f848-4351-914e-9a9e751194a3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.465133 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44f006eb-f848-4351-914e-9a9e751194a3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.465457 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tjqhf\" (UniqueName: \"kubernetes.io/projected/44f006eb-f848-4351-914e-9a9e751194a3-kube-api-access-tjqhf\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.465554 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/44f006eb-f848-4351-914e-9a9e751194a3-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.465669 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44f006eb-f848-4351-914e-9a9e751194a3-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.710855 4910 generic.go:334] "Generic (PLEG): container finished" podID="649fef5f-2881-49d4-8bf4-1cf8e93a87b3" containerID="35bc838138999a85ba33aa625ee381aff61ed72cb60e68c0f63973fd33ed2d44" exitCode=0 Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.710920 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b4f5fc4f-tddh2" event={"ID":"649fef5f-2881-49d4-8bf4-1cf8e93a87b3","Type":"ContainerDied","Data":"35bc838138999a85ba33aa625ee381aff61ed72cb60e68c0f63973fd33ed2d44"} Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.713275 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-7srlg" event={"ID":"44f006eb-f848-4351-914e-9a9e751194a3","Type":"ContainerDied","Data":"ad9d152d89bf0aae50366306be76e1e575d79035970e611b02a0492239883eba"} Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.713341 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad9d152d89bf0aae50366306be76e1e575d79035970e611b02a0492239883eba" Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.713502 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-7srlg" Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.802820 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b4f5fc4f-tddh2" Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.892466 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.892820 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="55390b37-0abf-4520-ae3e-6f361a2b3f17" containerName="nova-api-api" containerID="cri-o://80e6da2bb90d9c3d1a96d9dce6a11be58fb7c39b2ab143358753f2b371ac141e" gracePeriod=30 Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.892820 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="55390b37-0abf-4520-ae3e-6f361a2b3f17" containerName="nova-api-log" containerID="cri-o://cec591e146b576f40e19764d655ead555ad982542f05b667990292f76be5bae5" gracePeriod=30 Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.918258 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.930364 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.930773 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="da681308-73a5-4c6c-a1db-c2bf0dde126c" containerName="nova-metadata-log" containerID="cri-o://5bd015bbe2da84b1fc80cbd102483c8e213499eb74b2b27c295d59d34ab08421" gracePeriod=30 Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.931354 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="da681308-73a5-4c6c-a1db-c2bf0dde126c" containerName="nova-metadata-metadata" containerID="cri-o://606e6f2e3497ecb01d14072492d8281fecb70b6d64f8c39aa3b524fbcf3ac680" gracePeriod=30 Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.975301 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-ovsdbserver-sb\") pod \"649fef5f-2881-49d4-8bf4-1cf8e93a87b3\" (UID: \"649fef5f-2881-49d4-8bf4-1cf8e93a87b3\") " Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.975697 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-dns-swift-storage-0\") pod \"649fef5f-2881-49d4-8bf4-1cf8e93a87b3\" (UID: \"649fef5f-2881-49d4-8bf4-1cf8e93a87b3\") " Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.975751 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g5tqq\" (UniqueName: \"kubernetes.io/projected/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-kube-api-access-g5tqq\") pod \"649fef5f-2881-49d4-8bf4-1cf8e93a87b3\" (UID: \"649fef5f-2881-49d4-8bf4-1cf8e93a87b3\") " Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.975826 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-ovsdbserver-nb\") pod \"649fef5f-2881-49d4-8bf4-1cf8e93a87b3\" (UID: \"649fef5f-2881-49d4-8bf4-1cf8e93a87b3\") " Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.975909 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-config\") pod \"649fef5f-2881-49d4-8bf4-1cf8e93a87b3\" (UID: \"649fef5f-2881-49d4-8bf4-1cf8e93a87b3\") " Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.976038 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-dns-svc\") pod \"649fef5f-2881-49d4-8bf4-1cf8e93a87b3\" (UID: \"649fef5f-2881-49d4-8bf4-1cf8e93a87b3\") " Jan 05 22:13:49 crc kubenswrapper[4910]: I0105 22:13:49.981328 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-kube-api-access-g5tqq" (OuterVolumeSpecName: "kube-api-access-g5tqq") pod "649fef5f-2881-49d4-8bf4-1cf8e93a87b3" (UID: "649fef5f-2881-49d4-8bf4-1cf8e93a87b3"). InnerVolumeSpecName "kube-api-access-g5tqq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.049990 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "649fef5f-2881-49d4-8bf4-1cf8e93a87b3" (UID: "649fef5f-2881-49d4-8bf4-1cf8e93a87b3"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.059696 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-config" (OuterVolumeSpecName: "config") pod "649fef5f-2881-49d4-8bf4-1cf8e93a87b3" (UID: "649fef5f-2881-49d4-8bf4-1cf8e93a87b3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.059721 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "649fef5f-2881-49d4-8bf4-1cf8e93a87b3" (UID: "649fef5f-2881-49d4-8bf4-1cf8e93a87b3"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.059747 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "649fef5f-2881-49d4-8bf4-1cf8e93a87b3" (UID: "649fef5f-2881-49d4-8bf4-1cf8e93a87b3"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.068965 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "649fef5f-2881-49d4-8bf4-1cf8e93a87b3" (UID: "649fef5f-2881-49d4-8bf4-1cf8e93a87b3"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.077874 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.077929 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.077940 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.077953 4910 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.077963 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g5tqq\" (UniqueName: \"kubernetes.io/projected/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-kube-api-access-g5tqq\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.077974 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/649fef5f-2881-49d4-8bf4-1cf8e93a87b3-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:50 crc kubenswrapper[4910]: E0105 22:13:50.420175 4910 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podda681308_73a5_4c6c_a1db_c2bf0dde126c.slice/crio-conmon-606e6f2e3497ecb01d14072492d8281fecb70b6d64f8c39aa3b524fbcf3ac680.scope\": RecentStats: unable to find data in memory cache]" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.642214 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.727884 4910 generic.go:334] "Generic (PLEG): container finished" podID="da681308-73a5-4c6c-a1db-c2bf0dde126c" containerID="606e6f2e3497ecb01d14072492d8281fecb70b6d64f8c39aa3b524fbcf3ac680" exitCode=0 Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.727920 4910 generic.go:334] "Generic (PLEG): container finished" podID="da681308-73a5-4c6c-a1db-c2bf0dde126c" containerID="5bd015bbe2da84b1fc80cbd102483c8e213499eb74b2b27c295d59d34ab08421" exitCode=143 Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.728026 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.736630 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b4f5fc4f-tddh2" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.739829 4910 generic.go:334] "Generic (PLEG): container finished" podID="55390b37-0abf-4520-ae3e-6f361a2b3f17" containerID="cec591e146b576f40e19764d655ead555ad982542f05b667990292f76be5bae5" exitCode=143 Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.740035 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3" containerName="nova-scheduler-scheduler" containerID="cri-o://97a996baccc5a9fdf261faac05af6a9d922ac833eb10d27ac2a7070fc4f4745d" gracePeriod=30 Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.745412 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"da681308-73a5-4c6c-a1db-c2bf0dde126c","Type":"ContainerDied","Data":"606e6f2e3497ecb01d14072492d8281fecb70b6d64f8c39aa3b524fbcf3ac680"} Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.745448 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"da681308-73a5-4c6c-a1db-c2bf0dde126c","Type":"ContainerDied","Data":"5bd015bbe2da84b1fc80cbd102483c8e213499eb74b2b27c295d59d34ab08421"} Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.745460 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"da681308-73a5-4c6c-a1db-c2bf0dde126c","Type":"ContainerDied","Data":"cbad3fa2b1dba88c1bc07e75fbbfda000be45793b586e386a7f002889e3171e0"} Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.745471 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b4f5fc4f-tddh2" event={"ID":"649fef5f-2881-49d4-8bf4-1cf8e93a87b3","Type":"ContainerDied","Data":"3bd0be7e791fc8e3a16907f1479cb022653ddc9358e780336f8ca8f0a5fa95d5"} Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.745483 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"55390b37-0abf-4520-ae3e-6f361a2b3f17","Type":"ContainerDied","Data":"cec591e146b576f40e19764d655ead555ad982542f05b667990292f76be5bae5"} Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.745503 4910 scope.go:117] "RemoveContainer" containerID="606e6f2e3497ecb01d14072492d8281fecb70b6d64f8c39aa3b524fbcf3ac680" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.786523 4910 scope.go:117] "RemoveContainer" containerID="5bd015bbe2da84b1fc80cbd102483c8e213499eb74b2b27c295d59d34ab08421" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.793494 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da681308-73a5-4c6c-a1db-c2bf0dde126c-config-data\") pod \"da681308-73a5-4c6c-a1db-c2bf0dde126c\" (UID: \"da681308-73a5-4c6c-a1db-c2bf0dde126c\") " Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.793593 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f247k\" (UniqueName: \"kubernetes.io/projected/da681308-73a5-4c6c-a1db-c2bf0dde126c-kube-api-access-f247k\") pod \"da681308-73a5-4c6c-a1db-c2bf0dde126c\" (UID: \"da681308-73a5-4c6c-a1db-c2bf0dde126c\") " Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.793788 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da681308-73a5-4c6c-a1db-c2bf0dde126c-logs\") pod \"da681308-73a5-4c6c-a1db-c2bf0dde126c\" (UID: \"da681308-73a5-4c6c-a1db-c2bf0dde126c\") " Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.793827 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da681308-73a5-4c6c-a1db-c2bf0dde126c-combined-ca-bundle\") pod \"da681308-73a5-4c6c-a1db-c2bf0dde126c\" (UID: \"da681308-73a5-4c6c-a1db-c2bf0dde126c\") " Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.793914 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/da681308-73a5-4c6c-a1db-c2bf0dde126c-nova-metadata-tls-certs\") pod \"da681308-73a5-4c6c-a1db-c2bf0dde126c\" (UID: \"da681308-73a5-4c6c-a1db-c2bf0dde126c\") " Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.798104 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b4f5fc4f-tddh2"] Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.799803 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/da681308-73a5-4c6c-a1db-c2bf0dde126c-logs" (OuterVolumeSpecName: "logs") pod "da681308-73a5-4c6c-a1db-c2bf0dde126c" (UID: "da681308-73a5-4c6c-a1db-c2bf0dde126c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.816607 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da681308-73a5-4c6c-a1db-c2bf0dde126c-kube-api-access-f247k" (OuterVolumeSpecName: "kube-api-access-f247k") pod "da681308-73a5-4c6c-a1db-c2bf0dde126c" (UID: "da681308-73a5-4c6c-a1db-c2bf0dde126c"). InnerVolumeSpecName "kube-api-access-f247k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.819815 4910 scope.go:117] "RemoveContainer" containerID="606e6f2e3497ecb01d14072492d8281fecb70b6d64f8c39aa3b524fbcf3ac680" Jan 05 22:13:50 crc kubenswrapper[4910]: E0105 22:13:50.822330 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"606e6f2e3497ecb01d14072492d8281fecb70b6d64f8c39aa3b524fbcf3ac680\": container with ID starting with 606e6f2e3497ecb01d14072492d8281fecb70b6d64f8c39aa3b524fbcf3ac680 not found: ID does not exist" containerID="606e6f2e3497ecb01d14072492d8281fecb70b6d64f8c39aa3b524fbcf3ac680" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.822376 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"606e6f2e3497ecb01d14072492d8281fecb70b6d64f8c39aa3b524fbcf3ac680"} err="failed to get container status \"606e6f2e3497ecb01d14072492d8281fecb70b6d64f8c39aa3b524fbcf3ac680\": rpc error: code = NotFound desc = could not find container \"606e6f2e3497ecb01d14072492d8281fecb70b6d64f8c39aa3b524fbcf3ac680\": container with ID starting with 606e6f2e3497ecb01d14072492d8281fecb70b6d64f8c39aa3b524fbcf3ac680 not found: ID does not exist" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.822407 4910 scope.go:117] "RemoveContainer" containerID="5bd015bbe2da84b1fc80cbd102483c8e213499eb74b2b27c295d59d34ab08421" Jan 05 22:13:50 crc kubenswrapper[4910]: E0105 22:13:50.822694 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5bd015bbe2da84b1fc80cbd102483c8e213499eb74b2b27c295d59d34ab08421\": container with ID starting with 5bd015bbe2da84b1fc80cbd102483c8e213499eb74b2b27c295d59d34ab08421 not found: ID does not exist" containerID="5bd015bbe2da84b1fc80cbd102483c8e213499eb74b2b27c295d59d34ab08421" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.822724 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5bd015bbe2da84b1fc80cbd102483c8e213499eb74b2b27c295d59d34ab08421"} err="failed to get container status \"5bd015bbe2da84b1fc80cbd102483c8e213499eb74b2b27c295d59d34ab08421\": rpc error: code = NotFound desc = could not find container \"5bd015bbe2da84b1fc80cbd102483c8e213499eb74b2b27c295d59d34ab08421\": container with ID starting with 5bd015bbe2da84b1fc80cbd102483c8e213499eb74b2b27c295d59d34ab08421 not found: ID does not exist" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.822741 4910 scope.go:117] "RemoveContainer" containerID="606e6f2e3497ecb01d14072492d8281fecb70b6d64f8c39aa3b524fbcf3ac680" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.822970 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"606e6f2e3497ecb01d14072492d8281fecb70b6d64f8c39aa3b524fbcf3ac680"} err="failed to get container status \"606e6f2e3497ecb01d14072492d8281fecb70b6d64f8c39aa3b524fbcf3ac680\": rpc error: code = NotFound desc = could not find container \"606e6f2e3497ecb01d14072492d8281fecb70b6d64f8c39aa3b524fbcf3ac680\": container with ID starting with 606e6f2e3497ecb01d14072492d8281fecb70b6d64f8c39aa3b524fbcf3ac680 not found: ID does not exist" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.822994 4910 scope.go:117] "RemoveContainer" containerID="5bd015bbe2da84b1fc80cbd102483c8e213499eb74b2b27c295d59d34ab08421" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.823260 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5bd015bbe2da84b1fc80cbd102483c8e213499eb74b2b27c295d59d34ab08421"} err="failed to get container status \"5bd015bbe2da84b1fc80cbd102483c8e213499eb74b2b27c295d59d34ab08421\": rpc error: code = NotFound desc = could not find container \"5bd015bbe2da84b1fc80cbd102483c8e213499eb74b2b27c295d59d34ab08421\": container with ID starting with 5bd015bbe2da84b1fc80cbd102483c8e213499eb74b2b27c295d59d34ab08421 not found: ID does not exist" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.823283 4910 scope.go:117] "RemoveContainer" containerID="35bc838138999a85ba33aa625ee381aff61ed72cb60e68c0f63973fd33ed2d44" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.826378 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da681308-73a5-4c6c-a1db-c2bf0dde126c-config-data" (OuterVolumeSpecName: "config-data") pod "da681308-73a5-4c6c-a1db-c2bf0dde126c" (UID: "da681308-73a5-4c6c-a1db-c2bf0dde126c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.843768 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da681308-73a5-4c6c-a1db-c2bf0dde126c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "da681308-73a5-4c6c-a1db-c2bf0dde126c" (UID: "da681308-73a5-4c6c-a1db-c2bf0dde126c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.849873 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6b4f5fc4f-tddh2"] Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.881933 4910 scope.go:117] "RemoveContainer" containerID="ca55847c5a90177cacabbb3c972e4287be7959ac0ac8664ef9f61c1b46b3dc56" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.897195 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da681308-73a5-4c6c-a1db-c2bf0dde126c-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.897225 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f247k\" (UniqueName: \"kubernetes.io/projected/da681308-73a5-4c6c-a1db-c2bf0dde126c-kube-api-access-f247k\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.897234 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da681308-73a5-4c6c-a1db-c2bf0dde126c-logs\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.897242 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da681308-73a5-4c6c-a1db-c2bf0dde126c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.899042 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da681308-73a5-4c6c-a1db-c2bf0dde126c-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "da681308-73a5-4c6c-a1db-c2bf0dde126c" (UID: "da681308-73a5-4c6c-a1db-c2bf0dde126c"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:13:50 crc kubenswrapper[4910]: I0105 22:13:50.999491 4910 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/da681308-73a5-4c6c-a1db-c2bf0dde126c-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.073324 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.087775 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.104384 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 05 22:13:51 crc kubenswrapper[4910]: E0105 22:13:51.104882 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da681308-73a5-4c6c-a1db-c2bf0dde126c" containerName="nova-metadata-metadata" Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.104903 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="da681308-73a5-4c6c-a1db-c2bf0dde126c" containerName="nova-metadata-metadata" Jan 05 22:13:51 crc kubenswrapper[4910]: E0105 22:13:51.104919 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44f006eb-f848-4351-914e-9a9e751194a3" containerName="nova-manage" Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.104926 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="44f006eb-f848-4351-914e-9a9e751194a3" containerName="nova-manage" Jan 05 22:13:51 crc kubenswrapper[4910]: E0105 22:13:51.104951 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="649fef5f-2881-49d4-8bf4-1cf8e93a87b3" containerName="dnsmasq-dns" Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.104957 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="649fef5f-2881-49d4-8bf4-1cf8e93a87b3" containerName="dnsmasq-dns" Jan 05 22:13:51 crc kubenswrapper[4910]: E0105 22:13:51.104980 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="649fef5f-2881-49d4-8bf4-1cf8e93a87b3" containerName="init" Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.104986 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="649fef5f-2881-49d4-8bf4-1cf8e93a87b3" containerName="init" Jan 05 22:13:51 crc kubenswrapper[4910]: E0105 22:13:51.105003 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da681308-73a5-4c6c-a1db-c2bf0dde126c" containerName="nova-metadata-log" Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.105011 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="da681308-73a5-4c6c-a1db-c2bf0dde126c" containerName="nova-metadata-log" Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.105224 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="da681308-73a5-4c6c-a1db-c2bf0dde126c" containerName="nova-metadata-log" Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.105243 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="44f006eb-f848-4351-914e-9a9e751194a3" containerName="nova-manage" Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.105259 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="da681308-73a5-4c6c-a1db-c2bf0dde126c" containerName="nova-metadata-metadata" Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.105273 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="649fef5f-2881-49d4-8bf4-1cf8e93a87b3" containerName="dnsmasq-dns" Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.106289 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.108746 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.108946 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.116724 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.202660 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/640b0e1e-49a8-4daf-899f-c1a7ab82e976-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"640b0e1e-49a8-4daf-899f-c1a7ab82e976\") " pod="openstack/nova-metadata-0" Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.202735 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/640b0e1e-49a8-4daf-899f-c1a7ab82e976-config-data\") pod \"nova-metadata-0\" (UID: \"640b0e1e-49a8-4daf-899f-c1a7ab82e976\") " pod="openstack/nova-metadata-0" Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.202813 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pk8h8\" (UniqueName: \"kubernetes.io/projected/640b0e1e-49a8-4daf-899f-c1a7ab82e976-kube-api-access-pk8h8\") pod \"nova-metadata-0\" (UID: \"640b0e1e-49a8-4daf-899f-c1a7ab82e976\") " pod="openstack/nova-metadata-0" Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.202834 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/640b0e1e-49a8-4daf-899f-c1a7ab82e976-logs\") pod \"nova-metadata-0\" (UID: \"640b0e1e-49a8-4daf-899f-c1a7ab82e976\") " pod="openstack/nova-metadata-0" Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.202863 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/640b0e1e-49a8-4daf-899f-c1a7ab82e976-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"640b0e1e-49a8-4daf-899f-c1a7ab82e976\") " pod="openstack/nova-metadata-0" Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.304277 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pk8h8\" (UniqueName: \"kubernetes.io/projected/640b0e1e-49a8-4daf-899f-c1a7ab82e976-kube-api-access-pk8h8\") pod \"nova-metadata-0\" (UID: \"640b0e1e-49a8-4daf-899f-c1a7ab82e976\") " pod="openstack/nova-metadata-0" Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.304325 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/640b0e1e-49a8-4daf-899f-c1a7ab82e976-logs\") pod \"nova-metadata-0\" (UID: \"640b0e1e-49a8-4daf-899f-c1a7ab82e976\") " pod="openstack/nova-metadata-0" Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.304360 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/640b0e1e-49a8-4daf-899f-c1a7ab82e976-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"640b0e1e-49a8-4daf-899f-c1a7ab82e976\") " pod="openstack/nova-metadata-0" Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.304418 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/640b0e1e-49a8-4daf-899f-c1a7ab82e976-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"640b0e1e-49a8-4daf-899f-c1a7ab82e976\") " pod="openstack/nova-metadata-0" Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.304471 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/640b0e1e-49a8-4daf-899f-c1a7ab82e976-config-data\") pod \"nova-metadata-0\" (UID: \"640b0e1e-49a8-4daf-899f-c1a7ab82e976\") " pod="openstack/nova-metadata-0" Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.306257 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/640b0e1e-49a8-4daf-899f-c1a7ab82e976-logs\") pod \"nova-metadata-0\" (UID: \"640b0e1e-49a8-4daf-899f-c1a7ab82e976\") " pod="openstack/nova-metadata-0" Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.308513 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/640b0e1e-49a8-4daf-899f-c1a7ab82e976-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"640b0e1e-49a8-4daf-899f-c1a7ab82e976\") " pod="openstack/nova-metadata-0" Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.308676 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/640b0e1e-49a8-4daf-899f-c1a7ab82e976-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"640b0e1e-49a8-4daf-899f-c1a7ab82e976\") " pod="openstack/nova-metadata-0" Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.308864 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/640b0e1e-49a8-4daf-899f-c1a7ab82e976-config-data\") pod \"nova-metadata-0\" (UID: \"640b0e1e-49a8-4daf-899f-c1a7ab82e976\") " pod="openstack/nova-metadata-0" Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.326790 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pk8h8\" (UniqueName: \"kubernetes.io/projected/640b0e1e-49a8-4daf-899f-c1a7ab82e976-kube-api-access-pk8h8\") pod \"nova-metadata-0\" (UID: \"640b0e1e-49a8-4daf-899f-c1a7ab82e976\") " pod="openstack/nova-metadata-0" Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.500444 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.759941 4910 generic.go:334] "Generic (PLEG): container finished" podID="e520b140-ba86-4e17-82d2-4e8c4dc15474" containerID="5e825dd38b907536857a97445f1e68ba17937cbaba208d22383b08c67caa5ac5" exitCode=0 Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.760347 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-gqjqz" event={"ID":"e520b140-ba86-4e17-82d2-4e8c4dc15474","Type":"ContainerDied","Data":"5e825dd38b907536857a97445f1e68ba17937cbaba208d22383b08c67caa5ac5"} Jan 05 22:13:51 crc kubenswrapper[4910]: I0105 22:13:51.962573 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 22:13:52 crc kubenswrapper[4910]: I0105 22:13:52.733320 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="649fef5f-2881-49d4-8bf4-1cf8e93a87b3" path="/var/lib/kubelet/pods/649fef5f-2881-49d4-8bf4-1cf8e93a87b3/volumes" Jan 05 22:13:52 crc kubenswrapper[4910]: I0105 22:13:52.734564 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da681308-73a5-4c6c-a1db-c2bf0dde126c" path="/var/lib/kubelet/pods/da681308-73a5-4c6c-a1db-c2bf0dde126c/volumes" Jan 05 22:13:52 crc kubenswrapper[4910]: I0105 22:13:52.771997 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"640b0e1e-49a8-4daf-899f-c1a7ab82e976","Type":"ContainerStarted","Data":"05e89eb9ca56e3ebe59045f592314e58faf89eea75b4fe0a9ff2a77177a668a3"} Jan 05 22:13:52 crc kubenswrapper[4910]: I0105 22:13:52.772047 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"640b0e1e-49a8-4daf-899f-c1a7ab82e976","Type":"ContainerStarted","Data":"e752583795fe468de9af6eb52c315e5c49f57548b9b236bdb28a3bb75692ebb7"} Jan 05 22:13:52 crc kubenswrapper[4910]: I0105 22:13:52.772057 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"640b0e1e-49a8-4daf-899f-c1a7ab82e976","Type":"ContainerStarted","Data":"d82880f81a4b6c5266f95426d7d47847b24bcd4ad74b4f499b037e34e179b45a"} Jan 05 22:13:52 crc kubenswrapper[4910]: I0105 22:13:52.800567 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=1.800538194 podStartE2EDuration="1.800538194s" podCreationTimestamp="2026-01-05 22:13:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:13:52.797903879 +0000 UTC m=+1364.375401549" watchObservedRunningTime="2026-01-05 22:13:52.800538194 +0000 UTC m=+1364.378035874" Jan 05 22:13:53 crc kubenswrapper[4910]: I0105 22:13:53.093254 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-gqjqz" Jan 05 22:13:53 crc kubenswrapper[4910]: I0105 22:13:53.142825 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e520b140-ba86-4e17-82d2-4e8c4dc15474-scripts\") pod \"e520b140-ba86-4e17-82d2-4e8c4dc15474\" (UID: \"e520b140-ba86-4e17-82d2-4e8c4dc15474\") " Jan 05 22:13:53 crc kubenswrapper[4910]: I0105 22:13:53.143075 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e520b140-ba86-4e17-82d2-4e8c4dc15474-combined-ca-bundle\") pod \"e520b140-ba86-4e17-82d2-4e8c4dc15474\" (UID: \"e520b140-ba86-4e17-82d2-4e8c4dc15474\") " Jan 05 22:13:53 crc kubenswrapper[4910]: I0105 22:13:53.143146 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wtldk\" (UniqueName: \"kubernetes.io/projected/e520b140-ba86-4e17-82d2-4e8c4dc15474-kube-api-access-wtldk\") pod \"e520b140-ba86-4e17-82d2-4e8c4dc15474\" (UID: \"e520b140-ba86-4e17-82d2-4e8c4dc15474\") " Jan 05 22:13:53 crc kubenswrapper[4910]: I0105 22:13:53.143187 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e520b140-ba86-4e17-82d2-4e8c4dc15474-config-data\") pod \"e520b140-ba86-4e17-82d2-4e8c4dc15474\" (UID: \"e520b140-ba86-4e17-82d2-4e8c4dc15474\") " Jan 05 22:13:53 crc kubenswrapper[4910]: I0105 22:13:53.150964 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e520b140-ba86-4e17-82d2-4e8c4dc15474-kube-api-access-wtldk" (OuterVolumeSpecName: "kube-api-access-wtldk") pod "e520b140-ba86-4e17-82d2-4e8c4dc15474" (UID: "e520b140-ba86-4e17-82d2-4e8c4dc15474"). InnerVolumeSpecName "kube-api-access-wtldk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:13:53 crc kubenswrapper[4910]: I0105 22:13:53.151066 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e520b140-ba86-4e17-82d2-4e8c4dc15474-scripts" (OuterVolumeSpecName: "scripts") pod "e520b140-ba86-4e17-82d2-4e8c4dc15474" (UID: "e520b140-ba86-4e17-82d2-4e8c4dc15474"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:13:53 crc kubenswrapper[4910]: I0105 22:13:53.180001 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e520b140-ba86-4e17-82d2-4e8c4dc15474-config-data" (OuterVolumeSpecName: "config-data") pod "e520b140-ba86-4e17-82d2-4e8c4dc15474" (UID: "e520b140-ba86-4e17-82d2-4e8c4dc15474"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:13:53 crc kubenswrapper[4910]: I0105 22:13:53.195608 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e520b140-ba86-4e17-82d2-4e8c4dc15474-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e520b140-ba86-4e17-82d2-4e8c4dc15474" (UID: "e520b140-ba86-4e17-82d2-4e8c4dc15474"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:13:53 crc kubenswrapper[4910]: I0105 22:13:53.245441 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e520b140-ba86-4e17-82d2-4e8c4dc15474-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:53 crc kubenswrapper[4910]: I0105 22:13:53.245496 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wtldk\" (UniqueName: \"kubernetes.io/projected/e520b140-ba86-4e17-82d2-4e8c4dc15474-kube-api-access-wtldk\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:53 crc kubenswrapper[4910]: I0105 22:13:53.245508 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e520b140-ba86-4e17-82d2-4e8c4dc15474-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:53 crc kubenswrapper[4910]: I0105 22:13:53.245516 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e520b140-ba86-4e17-82d2-4e8c4dc15474-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:53 crc kubenswrapper[4910]: E0105 22:13:53.273769 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="97a996baccc5a9fdf261faac05af6a9d922ac833eb10d27ac2a7070fc4f4745d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 05 22:13:53 crc kubenswrapper[4910]: E0105 22:13:53.275585 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="97a996baccc5a9fdf261faac05af6a9d922ac833eb10d27ac2a7070fc4f4745d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 05 22:13:53 crc kubenswrapper[4910]: E0105 22:13:53.277028 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="97a996baccc5a9fdf261faac05af6a9d922ac833eb10d27ac2a7070fc4f4745d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 05 22:13:53 crc kubenswrapper[4910]: E0105 22:13:53.277073 4910 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3" containerName="nova-scheduler-scheduler" Jan 05 22:13:53 crc kubenswrapper[4910]: I0105 22:13:53.786862 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-gqjqz" Jan 05 22:13:53 crc kubenswrapper[4910]: I0105 22:13:53.791449 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-gqjqz" event={"ID":"e520b140-ba86-4e17-82d2-4e8c4dc15474","Type":"ContainerDied","Data":"ba23b6f09b27fd72fcb26a9a07fd7faa5fd559ec7a4373b9676bb3b4cc71928c"} Jan 05 22:13:53 crc kubenswrapper[4910]: I0105 22:13:53.791509 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ba23b6f09b27fd72fcb26a9a07fd7faa5fd559ec7a4373b9676bb3b4cc71928c" Jan 05 22:13:53 crc kubenswrapper[4910]: I0105 22:13:53.852779 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 05 22:13:53 crc kubenswrapper[4910]: E0105 22:13:53.853329 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e520b140-ba86-4e17-82d2-4e8c4dc15474" containerName="nova-cell1-conductor-db-sync" Jan 05 22:13:53 crc kubenswrapper[4910]: I0105 22:13:53.853344 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="e520b140-ba86-4e17-82d2-4e8c4dc15474" containerName="nova-cell1-conductor-db-sync" Jan 05 22:13:53 crc kubenswrapper[4910]: I0105 22:13:53.853576 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="e520b140-ba86-4e17-82d2-4e8c4dc15474" containerName="nova-cell1-conductor-db-sync" Jan 05 22:13:53 crc kubenswrapper[4910]: I0105 22:13:53.854285 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 05 22:13:53 crc kubenswrapper[4910]: I0105 22:13:53.858410 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 05 22:13:53 crc kubenswrapper[4910]: I0105 22:13:53.864159 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 05 22:13:53 crc kubenswrapper[4910]: I0105 22:13:53.958803 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70694d65-fa64-4667-b1aa-bac50650687c-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"70694d65-fa64-4667-b1aa-bac50650687c\") " pod="openstack/nova-cell1-conductor-0" Jan 05 22:13:53 crc kubenswrapper[4910]: I0105 22:13:53.959171 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vxsxb\" (UniqueName: \"kubernetes.io/projected/70694d65-fa64-4667-b1aa-bac50650687c-kube-api-access-vxsxb\") pod \"nova-cell1-conductor-0\" (UID: \"70694d65-fa64-4667-b1aa-bac50650687c\") " pod="openstack/nova-cell1-conductor-0" Jan 05 22:13:53 crc kubenswrapper[4910]: I0105 22:13:53.959345 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70694d65-fa64-4667-b1aa-bac50650687c-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"70694d65-fa64-4667-b1aa-bac50650687c\") " pod="openstack/nova-cell1-conductor-0" Jan 05 22:13:54 crc kubenswrapper[4910]: I0105 22:13:54.061959 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vxsxb\" (UniqueName: \"kubernetes.io/projected/70694d65-fa64-4667-b1aa-bac50650687c-kube-api-access-vxsxb\") pod \"nova-cell1-conductor-0\" (UID: \"70694d65-fa64-4667-b1aa-bac50650687c\") " pod="openstack/nova-cell1-conductor-0" Jan 05 22:13:54 crc kubenswrapper[4910]: I0105 22:13:54.062056 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70694d65-fa64-4667-b1aa-bac50650687c-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"70694d65-fa64-4667-b1aa-bac50650687c\") " pod="openstack/nova-cell1-conductor-0" Jan 05 22:13:54 crc kubenswrapper[4910]: I0105 22:13:54.062152 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70694d65-fa64-4667-b1aa-bac50650687c-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"70694d65-fa64-4667-b1aa-bac50650687c\") " pod="openstack/nova-cell1-conductor-0" Jan 05 22:13:54 crc kubenswrapper[4910]: I0105 22:13:54.065641 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70694d65-fa64-4667-b1aa-bac50650687c-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"70694d65-fa64-4667-b1aa-bac50650687c\") " pod="openstack/nova-cell1-conductor-0" Jan 05 22:13:54 crc kubenswrapper[4910]: I0105 22:13:54.066504 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70694d65-fa64-4667-b1aa-bac50650687c-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"70694d65-fa64-4667-b1aa-bac50650687c\") " pod="openstack/nova-cell1-conductor-0" Jan 05 22:13:54 crc kubenswrapper[4910]: I0105 22:13:54.079211 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vxsxb\" (UniqueName: \"kubernetes.io/projected/70694d65-fa64-4667-b1aa-bac50650687c-kube-api-access-vxsxb\") pod \"nova-cell1-conductor-0\" (UID: \"70694d65-fa64-4667-b1aa-bac50650687c\") " pod="openstack/nova-cell1-conductor-0" Jan 05 22:13:54 crc kubenswrapper[4910]: I0105 22:13:54.174309 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 05 22:13:54 crc kubenswrapper[4910]: I0105 22:13:54.601869 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 05 22:13:54 crc kubenswrapper[4910]: I0105 22:13:54.817402 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"70694d65-fa64-4667-b1aa-bac50650687c","Type":"ContainerStarted","Data":"efabaebeda908dacb3dd8334719822a571f4a923ae770b61178621243975e318"} Jan 05 22:13:55 crc kubenswrapper[4910]: I0105 22:13:55.799844 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 05 22:13:55 crc kubenswrapper[4910]: I0105 22:13:55.805678 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 05 22:13:55 crc kubenswrapper[4910]: I0105 22:13:55.879155 4910 generic.go:334] "Generic (PLEG): container finished" podID="3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3" containerID="97a996baccc5a9fdf261faac05af6a9d922ac833eb10d27ac2a7070fc4f4745d" exitCode=0 Jan 05 22:13:55 crc kubenswrapper[4910]: I0105 22:13:55.881457 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 05 22:13:55 crc kubenswrapper[4910]: I0105 22:13:55.881446 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3","Type":"ContainerDied","Data":"97a996baccc5a9fdf261faac05af6a9d922ac833eb10d27ac2a7070fc4f4745d"} Jan 05 22:13:55 crc kubenswrapper[4910]: I0105 22:13:55.882427 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3","Type":"ContainerDied","Data":"6f0e5856611e59e3566f5a3fda02c37e2a094979061f82b9b910493d7a638dc5"} Jan 05 22:13:55 crc kubenswrapper[4910]: I0105 22:13:55.882457 4910 scope.go:117] "RemoveContainer" containerID="97a996baccc5a9fdf261faac05af6a9d922ac833eb10d27ac2a7070fc4f4745d" Jan 05 22:13:55 crc kubenswrapper[4910]: I0105 22:13:55.889537 4910 generic.go:334] "Generic (PLEG): container finished" podID="55390b37-0abf-4520-ae3e-6f361a2b3f17" containerID="80e6da2bb90d9c3d1a96d9dce6a11be58fb7c39b2ab143358753f2b371ac141e" exitCode=0 Jan 05 22:13:55 crc kubenswrapper[4910]: I0105 22:13:55.889806 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"55390b37-0abf-4520-ae3e-6f361a2b3f17","Type":"ContainerDied","Data":"80e6da2bb90d9c3d1a96d9dce6a11be58fb7c39b2ab143358753f2b371ac141e"} Jan 05 22:13:55 crc kubenswrapper[4910]: I0105 22:13:55.889860 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"55390b37-0abf-4520-ae3e-6f361a2b3f17","Type":"ContainerDied","Data":"b29a3e898bf668bba013aa653d49a865c0feec0c78c0a7944b22b86a2667c459"} Jan 05 22:13:55 crc kubenswrapper[4910]: I0105 22:13:55.889836 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 05 22:13:55 crc kubenswrapper[4910]: I0105 22:13:55.893484 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"70694d65-fa64-4667-b1aa-bac50650687c","Type":"ContainerStarted","Data":"708b16276678b2822ae86c9c52e58e344dbcf830fd5f034e5d7cb53f881b9997"} Jan 05 22:13:55 crc kubenswrapper[4910]: I0105 22:13:55.894995 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Jan 05 22:13:55 crc kubenswrapper[4910]: I0105 22:13:55.914397 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3-config-data\") pod \"3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3\" (UID: \"3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3\") " Jan 05 22:13:55 crc kubenswrapper[4910]: I0105 22:13:55.914484 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/55390b37-0abf-4520-ae3e-6f361a2b3f17-logs\") pod \"55390b37-0abf-4520-ae3e-6f361a2b3f17\" (UID: \"55390b37-0abf-4520-ae3e-6f361a2b3f17\") " Jan 05 22:13:55 crc kubenswrapper[4910]: I0105 22:13:55.914513 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55390b37-0abf-4520-ae3e-6f361a2b3f17-combined-ca-bundle\") pod \"55390b37-0abf-4520-ae3e-6f361a2b3f17\" (UID: \"55390b37-0abf-4520-ae3e-6f361a2b3f17\") " Jan 05 22:13:55 crc kubenswrapper[4910]: I0105 22:13:55.914563 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r274j\" (UniqueName: \"kubernetes.io/projected/3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3-kube-api-access-r274j\") pod \"3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3\" (UID: \"3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3\") " Jan 05 22:13:55 crc kubenswrapper[4910]: I0105 22:13:55.914623 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3-combined-ca-bundle\") pod \"3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3\" (UID: \"3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3\") " Jan 05 22:13:55 crc kubenswrapper[4910]: I0105 22:13:55.914877 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55390b37-0abf-4520-ae3e-6f361a2b3f17-config-data\") pod \"55390b37-0abf-4520-ae3e-6f361a2b3f17\" (UID: \"55390b37-0abf-4520-ae3e-6f361a2b3f17\") " Jan 05 22:13:55 crc kubenswrapper[4910]: I0105 22:13:55.914916 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zh5gz\" (UniqueName: \"kubernetes.io/projected/55390b37-0abf-4520-ae3e-6f361a2b3f17-kube-api-access-zh5gz\") pod \"55390b37-0abf-4520-ae3e-6f361a2b3f17\" (UID: \"55390b37-0abf-4520-ae3e-6f361a2b3f17\") " Jan 05 22:13:55 crc kubenswrapper[4910]: I0105 22:13:55.918149 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/55390b37-0abf-4520-ae3e-6f361a2b3f17-logs" (OuterVolumeSpecName: "logs") pod "55390b37-0abf-4520-ae3e-6f361a2b3f17" (UID: "55390b37-0abf-4520-ae3e-6f361a2b3f17"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:13:55 crc kubenswrapper[4910]: I0105 22:13:55.923346 4910 scope.go:117] "RemoveContainer" containerID="97a996baccc5a9fdf261faac05af6a9d922ac833eb10d27ac2a7070fc4f4745d" Jan 05 22:13:55 crc kubenswrapper[4910]: E0105 22:13:55.924418 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"97a996baccc5a9fdf261faac05af6a9d922ac833eb10d27ac2a7070fc4f4745d\": container with ID starting with 97a996baccc5a9fdf261faac05af6a9d922ac833eb10d27ac2a7070fc4f4745d not found: ID does not exist" containerID="97a996baccc5a9fdf261faac05af6a9d922ac833eb10d27ac2a7070fc4f4745d" Jan 05 22:13:55 crc kubenswrapper[4910]: I0105 22:13:55.924546 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"97a996baccc5a9fdf261faac05af6a9d922ac833eb10d27ac2a7070fc4f4745d"} err="failed to get container status \"97a996baccc5a9fdf261faac05af6a9d922ac833eb10d27ac2a7070fc4f4745d\": rpc error: code = NotFound desc = could not find container \"97a996baccc5a9fdf261faac05af6a9d922ac833eb10d27ac2a7070fc4f4745d\": container with ID starting with 97a996baccc5a9fdf261faac05af6a9d922ac833eb10d27ac2a7070fc4f4745d not found: ID does not exist" Jan 05 22:13:55 crc kubenswrapper[4910]: I0105 22:13:55.924697 4910 scope.go:117] "RemoveContainer" containerID="80e6da2bb90d9c3d1a96d9dce6a11be58fb7c39b2ab143358753f2b371ac141e" Jan 05 22:13:55 crc kubenswrapper[4910]: I0105 22:13:55.927608 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.927585252 podStartE2EDuration="2.927585252s" podCreationTimestamp="2026-01-05 22:13:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:13:55.925133872 +0000 UTC m=+1367.502631542" watchObservedRunningTime="2026-01-05 22:13:55.927585252 +0000 UTC m=+1367.505082942" Jan 05 22:13:55 crc kubenswrapper[4910]: I0105 22:13:55.927875 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3-kube-api-access-r274j" (OuterVolumeSpecName: "kube-api-access-r274j") pod "3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3" (UID: "3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3"). InnerVolumeSpecName "kube-api-access-r274j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:13:55 crc kubenswrapper[4910]: I0105 22:13:55.931394 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55390b37-0abf-4520-ae3e-6f361a2b3f17-kube-api-access-zh5gz" (OuterVolumeSpecName: "kube-api-access-zh5gz") pod "55390b37-0abf-4520-ae3e-6f361a2b3f17" (UID: "55390b37-0abf-4520-ae3e-6f361a2b3f17"). InnerVolumeSpecName "kube-api-access-zh5gz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:13:55 crc kubenswrapper[4910]: I0105 22:13:55.952790 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55390b37-0abf-4520-ae3e-6f361a2b3f17-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "55390b37-0abf-4520-ae3e-6f361a2b3f17" (UID: "55390b37-0abf-4520-ae3e-6f361a2b3f17"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:13:55 crc kubenswrapper[4910]: I0105 22:13:55.958700 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3-config-data" (OuterVolumeSpecName: "config-data") pod "3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3" (UID: "3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:13:55 crc kubenswrapper[4910]: I0105 22:13:55.959022 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55390b37-0abf-4520-ae3e-6f361a2b3f17-config-data" (OuterVolumeSpecName: "config-data") pod "55390b37-0abf-4520-ae3e-6f361a2b3f17" (UID: "55390b37-0abf-4520-ae3e-6f361a2b3f17"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:13:55 crc kubenswrapper[4910]: I0105 22:13:55.959250 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3" (UID: "3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:13:55 crc kubenswrapper[4910]: I0105 22:13:55.994649 4910 scope.go:117] "RemoveContainer" containerID="cec591e146b576f40e19764d655ead555ad982542f05b667990292f76be5bae5" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.017506 4910 scope.go:117] "RemoveContainer" containerID="80e6da2bb90d9c3d1a96d9dce6a11be58fb7c39b2ab143358753f2b371ac141e" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.019010 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55390b37-0abf-4520-ae3e-6f361a2b3f17-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.019036 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zh5gz\" (UniqueName: \"kubernetes.io/projected/55390b37-0abf-4520-ae3e-6f361a2b3f17-kube-api-access-zh5gz\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.019052 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.019064 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/55390b37-0abf-4520-ae3e-6f361a2b3f17-logs\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.019075 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55390b37-0abf-4520-ae3e-6f361a2b3f17-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.019088 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r274j\" (UniqueName: \"kubernetes.io/projected/3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3-kube-api-access-r274j\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.019100 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:13:56 crc kubenswrapper[4910]: E0105 22:13:56.020472 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"80e6da2bb90d9c3d1a96d9dce6a11be58fb7c39b2ab143358753f2b371ac141e\": container with ID starting with 80e6da2bb90d9c3d1a96d9dce6a11be58fb7c39b2ab143358753f2b371ac141e not found: ID does not exist" containerID="80e6da2bb90d9c3d1a96d9dce6a11be58fb7c39b2ab143358753f2b371ac141e" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.020520 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"80e6da2bb90d9c3d1a96d9dce6a11be58fb7c39b2ab143358753f2b371ac141e"} err="failed to get container status \"80e6da2bb90d9c3d1a96d9dce6a11be58fb7c39b2ab143358753f2b371ac141e\": rpc error: code = NotFound desc = could not find container \"80e6da2bb90d9c3d1a96d9dce6a11be58fb7c39b2ab143358753f2b371ac141e\": container with ID starting with 80e6da2bb90d9c3d1a96d9dce6a11be58fb7c39b2ab143358753f2b371ac141e not found: ID does not exist" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.020556 4910 scope.go:117] "RemoveContainer" containerID="cec591e146b576f40e19764d655ead555ad982542f05b667990292f76be5bae5" Jan 05 22:13:56 crc kubenswrapper[4910]: E0105 22:13:56.021289 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cec591e146b576f40e19764d655ead555ad982542f05b667990292f76be5bae5\": container with ID starting with cec591e146b576f40e19764d655ead555ad982542f05b667990292f76be5bae5 not found: ID does not exist" containerID="cec591e146b576f40e19764d655ead555ad982542f05b667990292f76be5bae5" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.021370 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cec591e146b576f40e19764d655ead555ad982542f05b667990292f76be5bae5"} err="failed to get container status \"cec591e146b576f40e19764d655ead555ad982542f05b667990292f76be5bae5\": rpc error: code = NotFound desc = could not find container \"cec591e146b576f40e19764d655ead555ad982542f05b667990292f76be5bae5\": container with ID starting with cec591e146b576f40e19764d655ead555ad982542f05b667990292f76be5bae5 not found: ID does not exist" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.223923 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.239079 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.251697 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.266262 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.280583 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 22:13:56 crc kubenswrapper[4910]: E0105 22:13:56.281168 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55390b37-0abf-4520-ae3e-6f361a2b3f17" containerName="nova-api-log" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.281194 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="55390b37-0abf-4520-ae3e-6f361a2b3f17" containerName="nova-api-log" Jan 05 22:13:56 crc kubenswrapper[4910]: E0105 22:13:56.281227 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55390b37-0abf-4520-ae3e-6f361a2b3f17" containerName="nova-api-api" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.281237 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="55390b37-0abf-4520-ae3e-6f361a2b3f17" containerName="nova-api-api" Jan 05 22:13:56 crc kubenswrapper[4910]: E0105 22:13:56.281251 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3" containerName="nova-scheduler-scheduler" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.281259 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3" containerName="nova-scheduler-scheduler" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.292637 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3" containerName="nova-scheduler-scheduler" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.292707 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="55390b37-0abf-4520-ae3e-6f361a2b3f17" containerName="nova-api-api" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.292730 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="55390b37-0abf-4520-ae3e-6f361a2b3f17" containerName="nova-api-log" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.293868 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.294033 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.300381 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.303565 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.305483 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.317217 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.317594 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.325341 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b6624df-a935-4a61-8bff-0033ae391bbe-config-data\") pod \"nova-api-0\" (UID: \"5b6624df-a935-4a61-8bff-0033ae391bbe\") " pod="openstack/nova-api-0" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.325399 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b6624df-a935-4a61-8bff-0033ae391bbe-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5b6624df-a935-4a61-8bff-0033ae391bbe\") " pod="openstack/nova-api-0" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.325760 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9dedaba8-f1c7-4b13-a5c5-78b2ead1753c-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9dedaba8-f1c7-4b13-a5c5-78b2ead1753c\") " pod="openstack/nova-scheduler-0" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.325797 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6w6p\" (UniqueName: \"kubernetes.io/projected/9dedaba8-f1c7-4b13-a5c5-78b2ead1753c-kube-api-access-k6w6p\") pod \"nova-scheduler-0\" (UID: \"9dedaba8-f1c7-4b13-a5c5-78b2ead1753c\") " pod="openstack/nova-scheduler-0" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.325872 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5b6624df-a935-4a61-8bff-0033ae391bbe-logs\") pod \"nova-api-0\" (UID: \"5b6624df-a935-4a61-8bff-0033ae391bbe\") " pod="openstack/nova-api-0" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.325931 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jj7wb\" (UniqueName: \"kubernetes.io/projected/5b6624df-a935-4a61-8bff-0033ae391bbe-kube-api-access-jj7wb\") pod \"nova-api-0\" (UID: \"5b6624df-a935-4a61-8bff-0033ae391bbe\") " pod="openstack/nova-api-0" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.325966 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9dedaba8-f1c7-4b13-a5c5-78b2ead1753c-config-data\") pod \"nova-scheduler-0\" (UID: \"9dedaba8-f1c7-4b13-a5c5-78b2ead1753c\") " pod="openstack/nova-scheduler-0" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.428412 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9dedaba8-f1c7-4b13-a5c5-78b2ead1753c-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9dedaba8-f1c7-4b13-a5c5-78b2ead1753c\") " pod="openstack/nova-scheduler-0" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.428707 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6w6p\" (UniqueName: \"kubernetes.io/projected/9dedaba8-f1c7-4b13-a5c5-78b2ead1753c-kube-api-access-k6w6p\") pod \"nova-scheduler-0\" (UID: \"9dedaba8-f1c7-4b13-a5c5-78b2ead1753c\") " pod="openstack/nova-scheduler-0" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.428879 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5b6624df-a935-4a61-8bff-0033ae391bbe-logs\") pod \"nova-api-0\" (UID: \"5b6624df-a935-4a61-8bff-0033ae391bbe\") " pod="openstack/nova-api-0" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.429002 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jj7wb\" (UniqueName: \"kubernetes.io/projected/5b6624df-a935-4a61-8bff-0033ae391bbe-kube-api-access-jj7wb\") pod \"nova-api-0\" (UID: \"5b6624df-a935-4a61-8bff-0033ae391bbe\") " pod="openstack/nova-api-0" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.429140 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9dedaba8-f1c7-4b13-a5c5-78b2ead1753c-config-data\") pod \"nova-scheduler-0\" (UID: \"9dedaba8-f1c7-4b13-a5c5-78b2ead1753c\") " pod="openstack/nova-scheduler-0" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.429355 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b6624df-a935-4a61-8bff-0033ae391bbe-config-data\") pod \"nova-api-0\" (UID: \"5b6624df-a935-4a61-8bff-0033ae391bbe\") " pod="openstack/nova-api-0" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.429472 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b6624df-a935-4a61-8bff-0033ae391bbe-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5b6624df-a935-4a61-8bff-0033ae391bbe\") " pod="openstack/nova-api-0" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.429485 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5b6624df-a935-4a61-8bff-0033ae391bbe-logs\") pod \"nova-api-0\" (UID: \"5b6624df-a935-4a61-8bff-0033ae391bbe\") " pod="openstack/nova-api-0" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.433696 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b6624df-a935-4a61-8bff-0033ae391bbe-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5b6624df-a935-4a61-8bff-0033ae391bbe\") " pod="openstack/nova-api-0" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.433933 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9dedaba8-f1c7-4b13-a5c5-78b2ead1753c-config-data\") pod \"nova-scheduler-0\" (UID: \"9dedaba8-f1c7-4b13-a5c5-78b2ead1753c\") " pod="openstack/nova-scheduler-0" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.434061 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9dedaba8-f1c7-4b13-a5c5-78b2ead1753c-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9dedaba8-f1c7-4b13-a5c5-78b2ead1753c\") " pod="openstack/nova-scheduler-0" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.440632 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b6624df-a935-4a61-8bff-0033ae391bbe-config-data\") pod \"nova-api-0\" (UID: \"5b6624df-a935-4a61-8bff-0033ae391bbe\") " pod="openstack/nova-api-0" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.449797 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jj7wb\" (UniqueName: \"kubernetes.io/projected/5b6624df-a935-4a61-8bff-0033ae391bbe-kube-api-access-jj7wb\") pod \"nova-api-0\" (UID: \"5b6624df-a935-4a61-8bff-0033ae391bbe\") " pod="openstack/nova-api-0" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.460063 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6w6p\" (UniqueName: \"kubernetes.io/projected/9dedaba8-f1c7-4b13-a5c5-78b2ead1753c-kube-api-access-k6w6p\") pod \"nova-scheduler-0\" (UID: \"9dedaba8-f1c7-4b13-a5c5-78b2ead1753c\") " pod="openstack/nova-scheduler-0" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.501632 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.501708 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.626073 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.638528 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.738558 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3" path="/var/lib/kubelet/pods/3ce21ad1-6aaf-494d-b4ef-7e99f4b31ff3/volumes" Jan 05 22:13:56 crc kubenswrapper[4910]: I0105 22:13:56.740152 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55390b37-0abf-4520-ae3e-6f361a2b3f17" path="/var/lib/kubelet/pods/55390b37-0abf-4520-ae3e-6f361a2b3f17/volumes" Jan 05 22:13:57 crc kubenswrapper[4910]: I0105 22:13:57.181814 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 22:13:57 crc kubenswrapper[4910]: I0105 22:13:57.313144 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 05 22:13:57 crc kubenswrapper[4910]: I0105 22:13:57.923444 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5b6624df-a935-4a61-8bff-0033ae391bbe","Type":"ContainerStarted","Data":"7287cf08f6fbec7939f1dc18d1372415150373ba9c4bf96c3d928459d130f77d"} Jan 05 22:13:57 crc kubenswrapper[4910]: I0105 22:13:57.923959 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5b6624df-a935-4a61-8bff-0033ae391bbe","Type":"ContainerStarted","Data":"2b28673a467f6a655658de03e18c731e41f3424a3b2ca4ef2d62673073a73116"} Jan 05 22:13:57 crc kubenswrapper[4910]: I0105 22:13:57.925164 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9dedaba8-f1c7-4b13-a5c5-78b2ead1753c","Type":"ContainerStarted","Data":"10869218f49d1497aa8b4413fabc4bce2443981d8eca4525be5a59927d56345d"} Jan 05 22:13:57 crc kubenswrapper[4910]: I0105 22:13:57.925282 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9dedaba8-f1c7-4b13-a5c5-78b2ead1753c","Type":"ContainerStarted","Data":"0c263c31e48774f2eb04c9c74b29d692587996bd7b3a0e1601d8ae8201c3b616"} Jan 05 22:13:58 crc kubenswrapper[4910]: I0105 22:13:58.961871 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.961845228 podStartE2EDuration="2.961845228s" podCreationTimestamp="2026-01-05 22:13:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:13:58.954738691 +0000 UTC m=+1370.532236381" watchObservedRunningTime="2026-01-05 22:13:58.961845228 +0000 UTC m=+1370.539342908" Jan 05 22:13:59 crc kubenswrapper[4910]: I0105 22:13:59.223883 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Jan 05 22:13:59 crc kubenswrapper[4910]: I0105 22:13:59.945969 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5b6624df-a935-4a61-8bff-0033ae391bbe","Type":"ContainerStarted","Data":"12414d2a86b90ebb57ff3d6681b6e068c983f1c382ed067e604262d5fc4535e8"} Jan 05 22:13:59 crc kubenswrapper[4910]: I0105 22:13:59.966042 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.966022081 podStartE2EDuration="3.966022081s" podCreationTimestamp="2026-01-05 22:13:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:13:59.961491928 +0000 UTC m=+1371.538989588" watchObservedRunningTime="2026-01-05 22:13:59.966022081 +0000 UTC m=+1371.543519751" Jan 05 22:14:01 crc kubenswrapper[4910]: I0105 22:14:01.501562 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 05 22:14:01 crc kubenswrapper[4910]: I0105 22:14:01.501640 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 05 22:14:01 crc kubenswrapper[4910]: I0105 22:14:01.627256 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 05 22:14:02 crc kubenswrapper[4910]: I0105 22:14:02.514314 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="640b0e1e-49a8-4daf-899f-c1a7ab82e976" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.191:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 05 22:14:02 crc kubenswrapper[4910]: I0105 22:14:02.514320 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="640b0e1e-49a8-4daf-899f-c1a7ab82e976" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.191:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 05 22:14:06 crc kubenswrapper[4910]: I0105 22:14:06.627341 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 05 22:14:06 crc kubenswrapper[4910]: I0105 22:14:06.639485 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 05 22:14:06 crc kubenswrapper[4910]: I0105 22:14:06.639554 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 05 22:14:06 crc kubenswrapper[4910]: I0105 22:14:06.659273 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 05 22:14:07 crc kubenswrapper[4910]: I0105 22:14:07.040428 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 05 22:14:07 crc kubenswrapper[4910]: I0105 22:14:07.681413 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="5b6624df-a935-4a61-8bff-0033ae391bbe" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.194:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 05 22:14:07 crc kubenswrapper[4910]: I0105 22:14:07.681475 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="5b6624df-a935-4a61-8bff-0033ae391bbe" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.194:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 05 22:14:10 crc kubenswrapper[4910]: I0105 22:14:10.320226 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 05 22:14:11 crc kubenswrapper[4910]: I0105 22:14:11.508321 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 05 22:14:11 crc kubenswrapper[4910]: I0105 22:14:11.511189 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 05 22:14:11 crc kubenswrapper[4910]: I0105 22:14:11.515484 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 05 22:14:12 crc kubenswrapper[4910]: I0105 22:14:12.080772 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 05 22:14:14 crc kubenswrapper[4910]: I0105 22:14:14.093819 4910 generic.go:334] "Generic (PLEG): container finished" podID="a57cd3fa-1d29-4a8c-a85f-2735b92640a7" containerID="7851b7975bed40518031113c597026bfbf2f70db38691075fd3f8e3c99a934cf" exitCode=137 Jan 05 22:14:14 crc kubenswrapper[4910]: I0105 22:14:14.093928 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"a57cd3fa-1d29-4a8c-a85f-2735b92640a7","Type":"ContainerDied","Data":"7851b7975bed40518031113c597026bfbf2f70db38691075fd3f8e3c99a934cf"} Jan 05 22:14:14 crc kubenswrapper[4910]: I0105 22:14:14.515568 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:14:14 crc kubenswrapper[4910]: I0105 22:14:14.613004 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a57cd3fa-1d29-4a8c-a85f-2735b92640a7-config-data\") pod \"a57cd3fa-1d29-4a8c-a85f-2735b92640a7\" (UID: \"a57cd3fa-1d29-4a8c-a85f-2735b92640a7\") " Jan 05 22:14:14 crc kubenswrapper[4910]: I0105 22:14:14.613378 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sqwz2\" (UniqueName: \"kubernetes.io/projected/a57cd3fa-1d29-4a8c-a85f-2735b92640a7-kube-api-access-sqwz2\") pod \"a57cd3fa-1d29-4a8c-a85f-2735b92640a7\" (UID: \"a57cd3fa-1d29-4a8c-a85f-2735b92640a7\") " Jan 05 22:14:14 crc kubenswrapper[4910]: I0105 22:14:14.613430 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a57cd3fa-1d29-4a8c-a85f-2735b92640a7-combined-ca-bundle\") pod \"a57cd3fa-1d29-4a8c-a85f-2735b92640a7\" (UID: \"a57cd3fa-1d29-4a8c-a85f-2735b92640a7\") " Jan 05 22:14:14 crc kubenswrapper[4910]: I0105 22:14:14.622237 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a57cd3fa-1d29-4a8c-a85f-2735b92640a7-kube-api-access-sqwz2" (OuterVolumeSpecName: "kube-api-access-sqwz2") pod "a57cd3fa-1d29-4a8c-a85f-2735b92640a7" (UID: "a57cd3fa-1d29-4a8c-a85f-2735b92640a7"). InnerVolumeSpecName "kube-api-access-sqwz2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:14:14 crc kubenswrapper[4910]: I0105 22:14:14.651737 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a57cd3fa-1d29-4a8c-a85f-2735b92640a7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a57cd3fa-1d29-4a8c-a85f-2735b92640a7" (UID: "a57cd3fa-1d29-4a8c-a85f-2735b92640a7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:14:14 crc kubenswrapper[4910]: I0105 22:14:14.651899 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a57cd3fa-1d29-4a8c-a85f-2735b92640a7-config-data" (OuterVolumeSpecName: "config-data") pod "a57cd3fa-1d29-4a8c-a85f-2735b92640a7" (UID: "a57cd3fa-1d29-4a8c-a85f-2735b92640a7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:14:14 crc kubenswrapper[4910]: I0105 22:14:14.716288 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a57cd3fa-1d29-4a8c-a85f-2735b92640a7-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:14 crc kubenswrapper[4910]: I0105 22:14:14.716744 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sqwz2\" (UniqueName: \"kubernetes.io/projected/a57cd3fa-1d29-4a8c-a85f-2735b92640a7-kube-api-access-sqwz2\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:14 crc kubenswrapper[4910]: I0105 22:14:14.716871 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a57cd3fa-1d29-4a8c-a85f-2735b92640a7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:15 crc kubenswrapper[4910]: I0105 22:14:15.109796 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"a57cd3fa-1d29-4a8c-a85f-2735b92640a7","Type":"ContainerDied","Data":"b01e904f20b5a81df6b1d9a649f82674dbcc9788619f29a7d344dc5f7844d717"} Jan 05 22:14:15 crc kubenswrapper[4910]: I0105 22:14:15.110263 4910 scope.go:117] "RemoveContainer" containerID="7851b7975bed40518031113c597026bfbf2f70db38691075fd3f8e3c99a934cf" Jan 05 22:14:15 crc kubenswrapper[4910]: I0105 22:14:15.109850 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:14:15 crc kubenswrapper[4910]: I0105 22:14:15.150447 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 05 22:14:15 crc kubenswrapper[4910]: I0105 22:14:15.167196 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 05 22:14:15 crc kubenswrapper[4910]: I0105 22:14:15.179482 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 05 22:14:15 crc kubenswrapper[4910]: E0105 22:14:15.180047 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a57cd3fa-1d29-4a8c-a85f-2735b92640a7" containerName="nova-cell1-novncproxy-novncproxy" Jan 05 22:14:15 crc kubenswrapper[4910]: I0105 22:14:15.180072 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a57cd3fa-1d29-4a8c-a85f-2735b92640a7" containerName="nova-cell1-novncproxy-novncproxy" Jan 05 22:14:15 crc kubenswrapper[4910]: I0105 22:14:15.180365 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="a57cd3fa-1d29-4a8c-a85f-2735b92640a7" containerName="nova-cell1-novncproxy-novncproxy" Jan 05 22:14:15 crc kubenswrapper[4910]: I0105 22:14:15.181420 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:14:15 crc kubenswrapper[4910]: I0105 22:14:15.184999 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Jan 05 22:14:15 crc kubenswrapper[4910]: I0105 22:14:15.185419 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 05 22:14:15 crc kubenswrapper[4910]: I0105 22:14:15.185720 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Jan 05 22:14:15 crc kubenswrapper[4910]: I0105 22:14:15.205782 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 05 22:14:15 crc kubenswrapper[4910]: I0105 22:14:15.329431 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da2a33ae-86a0-465d-a05e-89007e39e580-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"da2a33ae-86a0-465d-a05e-89007e39e580\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:14:15 crc kubenswrapper[4910]: I0105 22:14:15.329468 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wx72l\" (UniqueName: \"kubernetes.io/projected/da2a33ae-86a0-465d-a05e-89007e39e580-kube-api-access-wx72l\") pod \"nova-cell1-novncproxy-0\" (UID: \"da2a33ae-86a0-465d-a05e-89007e39e580\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:14:15 crc kubenswrapper[4910]: I0105 22:14:15.329592 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da2a33ae-86a0-465d-a05e-89007e39e580-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"da2a33ae-86a0-465d-a05e-89007e39e580\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:14:15 crc kubenswrapper[4910]: I0105 22:14:15.329654 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/da2a33ae-86a0-465d-a05e-89007e39e580-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"da2a33ae-86a0-465d-a05e-89007e39e580\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:14:15 crc kubenswrapper[4910]: I0105 22:14:15.329728 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/da2a33ae-86a0-465d-a05e-89007e39e580-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"da2a33ae-86a0-465d-a05e-89007e39e580\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:14:15 crc kubenswrapper[4910]: I0105 22:14:15.431103 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/da2a33ae-86a0-465d-a05e-89007e39e580-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"da2a33ae-86a0-465d-a05e-89007e39e580\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:14:15 crc kubenswrapper[4910]: I0105 22:14:15.431228 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da2a33ae-86a0-465d-a05e-89007e39e580-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"da2a33ae-86a0-465d-a05e-89007e39e580\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:14:15 crc kubenswrapper[4910]: I0105 22:14:15.431247 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wx72l\" (UniqueName: \"kubernetes.io/projected/da2a33ae-86a0-465d-a05e-89007e39e580-kube-api-access-wx72l\") pod \"nova-cell1-novncproxy-0\" (UID: \"da2a33ae-86a0-465d-a05e-89007e39e580\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:14:15 crc kubenswrapper[4910]: I0105 22:14:15.431309 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da2a33ae-86a0-465d-a05e-89007e39e580-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"da2a33ae-86a0-465d-a05e-89007e39e580\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:14:15 crc kubenswrapper[4910]: I0105 22:14:15.431342 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/da2a33ae-86a0-465d-a05e-89007e39e580-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"da2a33ae-86a0-465d-a05e-89007e39e580\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:14:15 crc kubenswrapper[4910]: I0105 22:14:15.436545 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da2a33ae-86a0-465d-a05e-89007e39e580-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"da2a33ae-86a0-465d-a05e-89007e39e580\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:14:15 crc kubenswrapper[4910]: I0105 22:14:15.436651 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/da2a33ae-86a0-465d-a05e-89007e39e580-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"da2a33ae-86a0-465d-a05e-89007e39e580\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:14:15 crc kubenswrapper[4910]: I0105 22:14:15.438072 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da2a33ae-86a0-465d-a05e-89007e39e580-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"da2a33ae-86a0-465d-a05e-89007e39e580\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:14:15 crc kubenswrapper[4910]: I0105 22:14:15.438781 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/da2a33ae-86a0-465d-a05e-89007e39e580-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"da2a33ae-86a0-465d-a05e-89007e39e580\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:14:15 crc kubenswrapper[4910]: I0105 22:14:15.450701 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wx72l\" (UniqueName: \"kubernetes.io/projected/da2a33ae-86a0-465d-a05e-89007e39e580-kube-api-access-wx72l\") pod \"nova-cell1-novncproxy-0\" (UID: \"da2a33ae-86a0-465d-a05e-89007e39e580\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:14:15 crc kubenswrapper[4910]: I0105 22:14:15.522190 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:14:16 crc kubenswrapper[4910]: I0105 22:14:16.048224 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 05 22:14:16 crc kubenswrapper[4910]: I0105 22:14:16.129493 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"da2a33ae-86a0-465d-a05e-89007e39e580","Type":"ContainerStarted","Data":"eb8afa201c5865f37d529095414b4f20fa5054a5e70dda0c19928897a48322b6"} Jan 05 22:14:16 crc kubenswrapper[4910]: I0105 22:14:16.644924 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 05 22:14:16 crc kubenswrapper[4910]: I0105 22:14:16.646096 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 05 22:14:16 crc kubenswrapper[4910]: I0105 22:14:16.648615 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 05 22:14:16 crc kubenswrapper[4910]: I0105 22:14:16.651257 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 05 22:14:16 crc kubenswrapper[4910]: I0105 22:14:16.741520 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a57cd3fa-1d29-4a8c-a85f-2735b92640a7" path="/var/lib/kubelet/pods/a57cd3fa-1d29-4a8c-a85f-2735b92640a7/volumes" Jan 05 22:14:17 crc kubenswrapper[4910]: I0105 22:14:17.152920 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"da2a33ae-86a0-465d-a05e-89007e39e580","Type":"ContainerStarted","Data":"9508ef451ff0f7e73dc0cfea8eda8b03067704bfee4c29361c6f466617631e69"} Jan 05 22:14:17 crc kubenswrapper[4910]: I0105 22:14:17.153020 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 05 22:14:17 crc kubenswrapper[4910]: I0105 22:14:17.172240 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 05 22:14:17 crc kubenswrapper[4910]: I0105 22:14:17.187654 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.187621596 podStartE2EDuration="2.187621596s" podCreationTimestamp="2026-01-05 22:14:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:14:17.174949191 +0000 UTC m=+1388.752446861" watchObservedRunningTime="2026-01-05 22:14:17.187621596 +0000 UTC m=+1388.765119266" Jan 05 22:14:17 crc kubenswrapper[4910]: I0105 22:14:17.377366 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-867cd545c7-pd68r"] Jan 05 22:14:17 crc kubenswrapper[4910]: I0105 22:14:17.380084 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-867cd545c7-pd68r" Jan 05 22:14:17 crc kubenswrapper[4910]: I0105 22:14:17.401688 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-867cd545c7-pd68r"] Jan 05 22:14:17 crc kubenswrapper[4910]: I0105 22:14:17.482977 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f55a0cf4-44d3-4896-911b-430d13f1f67e-config\") pod \"dnsmasq-dns-867cd545c7-pd68r\" (UID: \"f55a0cf4-44d3-4896-911b-430d13f1f67e\") " pod="openstack/dnsmasq-dns-867cd545c7-pd68r" Jan 05 22:14:17 crc kubenswrapper[4910]: I0105 22:14:17.483028 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f55a0cf4-44d3-4896-911b-430d13f1f67e-ovsdbserver-sb\") pod \"dnsmasq-dns-867cd545c7-pd68r\" (UID: \"f55a0cf4-44d3-4896-911b-430d13f1f67e\") " pod="openstack/dnsmasq-dns-867cd545c7-pd68r" Jan 05 22:14:17 crc kubenswrapper[4910]: I0105 22:14:17.483071 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mwx6h\" (UniqueName: \"kubernetes.io/projected/f55a0cf4-44d3-4896-911b-430d13f1f67e-kube-api-access-mwx6h\") pod \"dnsmasq-dns-867cd545c7-pd68r\" (UID: \"f55a0cf4-44d3-4896-911b-430d13f1f67e\") " pod="openstack/dnsmasq-dns-867cd545c7-pd68r" Jan 05 22:14:17 crc kubenswrapper[4910]: I0105 22:14:17.483272 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f55a0cf4-44d3-4896-911b-430d13f1f67e-dns-swift-storage-0\") pod \"dnsmasq-dns-867cd545c7-pd68r\" (UID: \"f55a0cf4-44d3-4896-911b-430d13f1f67e\") " pod="openstack/dnsmasq-dns-867cd545c7-pd68r" Jan 05 22:14:17 crc kubenswrapper[4910]: I0105 22:14:17.483327 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f55a0cf4-44d3-4896-911b-430d13f1f67e-ovsdbserver-nb\") pod \"dnsmasq-dns-867cd545c7-pd68r\" (UID: \"f55a0cf4-44d3-4896-911b-430d13f1f67e\") " pod="openstack/dnsmasq-dns-867cd545c7-pd68r" Jan 05 22:14:17 crc kubenswrapper[4910]: I0105 22:14:17.483360 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f55a0cf4-44d3-4896-911b-430d13f1f67e-dns-svc\") pod \"dnsmasq-dns-867cd545c7-pd68r\" (UID: \"f55a0cf4-44d3-4896-911b-430d13f1f67e\") " pod="openstack/dnsmasq-dns-867cd545c7-pd68r" Jan 05 22:14:17 crc kubenswrapper[4910]: I0105 22:14:17.585440 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f55a0cf4-44d3-4896-911b-430d13f1f67e-dns-swift-storage-0\") pod \"dnsmasq-dns-867cd545c7-pd68r\" (UID: \"f55a0cf4-44d3-4896-911b-430d13f1f67e\") " pod="openstack/dnsmasq-dns-867cd545c7-pd68r" Jan 05 22:14:17 crc kubenswrapper[4910]: I0105 22:14:17.585542 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f55a0cf4-44d3-4896-911b-430d13f1f67e-ovsdbserver-nb\") pod \"dnsmasq-dns-867cd545c7-pd68r\" (UID: \"f55a0cf4-44d3-4896-911b-430d13f1f67e\") " pod="openstack/dnsmasq-dns-867cd545c7-pd68r" Jan 05 22:14:17 crc kubenswrapper[4910]: I0105 22:14:17.585566 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f55a0cf4-44d3-4896-911b-430d13f1f67e-dns-svc\") pod \"dnsmasq-dns-867cd545c7-pd68r\" (UID: \"f55a0cf4-44d3-4896-911b-430d13f1f67e\") " pod="openstack/dnsmasq-dns-867cd545c7-pd68r" Jan 05 22:14:17 crc kubenswrapper[4910]: I0105 22:14:17.585675 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f55a0cf4-44d3-4896-911b-430d13f1f67e-config\") pod \"dnsmasq-dns-867cd545c7-pd68r\" (UID: \"f55a0cf4-44d3-4896-911b-430d13f1f67e\") " pod="openstack/dnsmasq-dns-867cd545c7-pd68r" Jan 05 22:14:17 crc kubenswrapper[4910]: I0105 22:14:17.585707 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f55a0cf4-44d3-4896-911b-430d13f1f67e-ovsdbserver-sb\") pod \"dnsmasq-dns-867cd545c7-pd68r\" (UID: \"f55a0cf4-44d3-4896-911b-430d13f1f67e\") " pod="openstack/dnsmasq-dns-867cd545c7-pd68r" Jan 05 22:14:17 crc kubenswrapper[4910]: I0105 22:14:17.585757 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mwx6h\" (UniqueName: \"kubernetes.io/projected/f55a0cf4-44d3-4896-911b-430d13f1f67e-kube-api-access-mwx6h\") pod \"dnsmasq-dns-867cd545c7-pd68r\" (UID: \"f55a0cf4-44d3-4896-911b-430d13f1f67e\") " pod="openstack/dnsmasq-dns-867cd545c7-pd68r" Jan 05 22:14:17 crc kubenswrapper[4910]: I0105 22:14:17.599143 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f55a0cf4-44d3-4896-911b-430d13f1f67e-dns-svc\") pod \"dnsmasq-dns-867cd545c7-pd68r\" (UID: \"f55a0cf4-44d3-4896-911b-430d13f1f67e\") " pod="openstack/dnsmasq-dns-867cd545c7-pd68r" Jan 05 22:14:17 crc kubenswrapper[4910]: I0105 22:14:17.599881 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f55a0cf4-44d3-4896-911b-430d13f1f67e-config\") pod \"dnsmasq-dns-867cd545c7-pd68r\" (UID: \"f55a0cf4-44d3-4896-911b-430d13f1f67e\") " pod="openstack/dnsmasq-dns-867cd545c7-pd68r" Jan 05 22:14:17 crc kubenswrapper[4910]: I0105 22:14:17.600167 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f55a0cf4-44d3-4896-911b-430d13f1f67e-ovsdbserver-nb\") pod \"dnsmasq-dns-867cd545c7-pd68r\" (UID: \"f55a0cf4-44d3-4896-911b-430d13f1f67e\") " pod="openstack/dnsmasq-dns-867cd545c7-pd68r" Jan 05 22:14:17 crc kubenswrapper[4910]: I0105 22:14:17.600511 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f55a0cf4-44d3-4896-911b-430d13f1f67e-ovsdbserver-sb\") pod \"dnsmasq-dns-867cd545c7-pd68r\" (UID: \"f55a0cf4-44d3-4896-911b-430d13f1f67e\") " pod="openstack/dnsmasq-dns-867cd545c7-pd68r" Jan 05 22:14:17 crc kubenswrapper[4910]: I0105 22:14:17.602452 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f55a0cf4-44d3-4896-911b-430d13f1f67e-dns-swift-storage-0\") pod \"dnsmasq-dns-867cd545c7-pd68r\" (UID: \"f55a0cf4-44d3-4896-911b-430d13f1f67e\") " pod="openstack/dnsmasq-dns-867cd545c7-pd68r" Jan 05 22:14:17 crc kubenswrapper[4910]: I0105 22:14:17.627546 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mwx6h\" (UniqueName: \"kubernetes.io/projected/f55a0cf4-44d3-4896-911b-430d13f1f67e-kube-api-access-mwx6h\") pod \"dnsmasq-dns-867cd545c7-pd68r\" (UID: \"f55a0cf4-44d3-4896-911b-430d13f1f67e\") " pod="openstack/dnsmasq-dns-867cd545c7-pd68r" Jan 05 22:14:17 crc kubenswrapper[4910]: I0105 22:14:17.707593 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-867cd545c7-pd68r" Jan 05 22:14:18 crc kubenswrapper[4910]: I0105 22:14:18.326939 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-867cd545c7-pd68r"] Jan 05 22:14:18 crc kubenswrapper[4910]: W0105 22:14:18.329707 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf55a0cf4_44d3_4896_911b_430d13f1f67e.slice/crio-071b7781d03e5612a0f9721e4576f1cb43f6a2b70be9745539d6632bf90c2e7f WatchSource:0}: Error finding container 071b7781d03e5612a0f9721e4576f1cb43f6a2b70be9745539d6632bf90c2e7f: Status 404 returned error can't find the container with id 071b7781d03e5612a0f9721e4576f1cb43f6a2b70be9745539d6632bf90c2e7f Jan 05 22:14:19 crc kubenswrapper[4910]: I0105 22:14:19.174134 4910 generic.go:334] "Generic (PLEG): container finished" podID="f55a0cf4-44d3-4896-911b-430d13f1f67e" containerID="92466a799d531b6baec996fb3d13141ee302474468214caed62ddb96fa5208ad" exitCode=0 Jan 05 22:14:19 crc kubenswrapper[4910]: I0105 22:14:19.174199 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-867cd545c7-pd68r" event={"ID":"f55a0cf4-44d3-4896-911b-430d13f1f67e","Type":"ContainerDied","Data":"92466a799d531b6baec996fb3d13141ee302474468214caed62ddb96fa5208ad"} Jan 05 22:14:19 crc kubenswrapper[4910]: I0105 22:14:19.174566 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-867cd545c7-pd68r" event={"ID":"f55a0cf4-44d3-4896-911b-430d13f1f67e","Type":"ContainerStarted","Data":"071b7781d03e5612a0f9721e4576f1cb43f6a2b70be9745539d6632bf90c2e7f"} Jan 05 22:14:20 crc kubenswrapper[4910]: I0105 22:14:20.214561 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-867cd545c7-pd68r" event={"ID":"f55a0cf4-44d3-4896-911b-430d13f1f67e","Type":"ContainerStarted","Data":"5d88d5d67f2af076e38a459d8f23e6f3dfd6d4cf06b6347db6a041118cb2daba"} Jan 05 22:14:20 crc kubenswrapper[4910]: I0105 22:14:20.215515 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-867cd545c7-pd68r" Jan 05 22:14:20 crc kubenswrapper[4910]: I0105 22:14:20.253457 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-867cd545c7-pd68r" podStartSLOduration=3.253418394 podStartE2EDuration="3.253418394s" podCreationTimestamp="2026-01-05 22:14:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:14:20.239492988 +0000 UTC m=+1391.816990678" watchObservedRunningTime="2026-01-05 22:14:20.253418394 +0000 UTC m=+1391.830916064" Jan 05 22:14:20 crc kubenswrapper[4910]: I0105 22:14:20.330193 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:14:20 crc kubenswrapper[4910]: I0105 22:14:20.330543 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3f2250f2-3745-4cfd-8431-9d653c587b63" containerName="ceilometer-central-agent" containerID="cri-o://d093b482c421adc7d82da4245d88adc7b270620fd4852fc0d78a8373c23ea00e" gracePeriod=30 Jan 05 22:14:20 crc kubenswrapper[4910]: I0105 22:14:20.330699 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3f2250f2-3745-4cfd-8431-9d653c587b63" containerName="proxy-httpd" containerID="cri-o://852ee4e47f911c7c2fcd4579b638cfe58eb083e3d449b414069d34e2e083e724" gracePeriod=30 Jan 05 22:14:20 crc kubenswrapper[4910]: I0105 22:14:20.330816 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3f2250f2-3745-4cfd-8431-9d653c587b63" containerName="ceilometer-notification-agent" containerID="cri-o://999c2f7ca6118b09dbf9b8c9a92383cb7a09467572a0e86765bb0a727fb5a5d6" gracePeriod=30 Jan 05 22:14:20 crc kubenswrapper[4910]: I0105 22:14:20.330880 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3f2250f2-3745-4cfd-8431-9d653c587b63" containerName="sg-core" containerID="cri-o://ef1d83a2d2057417c628c88f388bdfbf088c3937e36d5b1f7cbfa026594aa33d" gracePeriod=30 Jan 05 22:14:20 crc kubenswrapper[4910]: I0105 22:14:20.522844 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:14:21 crc kubenswrapper[4910]: I0105 22:14:21.141271 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 05 22:14:21 crc kubenswrapper[4910]: I0105 22:14:21.141544 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="5b6624df-a935-4a61-8bff-0033ae391bbe" containerName="nova-api-log" containerID="cri-o://7287cf08f6fbec7939f1dc18d1372415150373ba9c4bf96c3d928459d130f77d" gracePeriod=30 Jan 05 22:14:21 crc kubenswrapper[4910]: I0105 22:14:21.141621 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="5b6624df-a935-4a61-8bff-0033ae391bbe" containerName="nova-api-api" containerID="cri-o://12414d2a86b90ebb57ff3d6681b6e068c983f1c382ed067e604262d5fc4535e8" gracePeriod=30 Jan 05 22:14:21 crc kubenswrapper[4910]: I0105 22:14:21.227585 4910 generic.go:334] "Generic (PLEG): container finished" podID="3f2250f2-3745-4cfd-8431-9d653c587b63" containerID="852ee4e47f911c7c2fcd4579b638cfe58eb083e3d449b414069d34e2e083e724" exitCode=0 Jan 05 22:14:21 crc kubenswrapper[4910]: I0105 22:14:21.227628 4910 generic.go:334] "Generic (PLEG): container finished" podID="3f2250f2-3745-4cfd-8431-9d653c587b63" containerID="ef1d83a2d2057417c628c88f388bdfbf088c3937e36d5b1f7cbfa026594aa33d" exitCode=2 Jan 05 22:14:21 crc kubenswrapper[4910]: I0105 22:14:21.227639 4910 generic.go:334] "Generic (PLEG): container finished" podID="3f2250f2-3745-4cfd-8431-9d653c587b63" containerID="d093b482c421adc7d82da4245d88adc7b270620fd4852fc0d78a8373c23ea00e" exitCode=0 Jan 05 22:14:21 crc kubenswrapper[4910]: I0105 22:14:21.227654 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3f2250f2-3745-4cfd-8431-9d653c587b63","Type":"ContainerDied","Data":"852ee4e47f911c7c2fcd4579b638cfe58eb083e3d449b414069d34e2e083e724"} Jan 05 22:14:21 crc kubenswrapper[4910]: I0105 22:14:21.227712 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3f2250f2-3745-4cfd-8431-9d653c587b63","Type":"ContainerDied","Data":"ef1d83a2d2057417c628c88f388bdfbf088c3937e36d5b1f7cbfa026594aa33d"} Jan 05 22:14:21 crc kubenswrapper[4910]: I0105 22:14:21.227723 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3f2250f2-3745-4cfd-8431-9d653c587b63","Type":"ContainerDied","Data":"d093b482c421adc7d82da4245d88adc7b270620fd4852fc0d78a8373c23ea00e"} Jan 05 22:14:21 crc kubenswrapper[4910]: E0105 22:14:21.242647 4910 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5b6624df_a935_4a61_8bff_0033ae391bbe.slice/crio-conmon-7287cf08f6fbec7939f1dc18d1372415150373ba9c4bf96c3d928459d130f77d.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5b6624df_a935_4a61_8bff_0033ae391bbe.slice/crio-7287cf08f6fbec7939f1dc18d1372415150373ba9c4bf96c3d928459d130f77d.scope\": RecentStats: unable to find data in memory cache]" Jan 05 22:14:22 crc kubenswrapper[4910]: I0105 22:14:22.237677 4910 generic.go:334] "Generic (PLEG): container finished" podID="5b6624df-a935-4a61-8bff-0033ae391bbe" containerID="7287cf08f6fbec7939f1dc18d1372415150373ba9c4bf96c3d928459d130f77d" exitCode=143 Jan 05 22:14:22 crc kubenswrapper[4910]: I0105 22:14:22.237841 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5b6624df-a935-4a61-8bff-0033ae391bbe","Type":"ContainerDied","Data":"7287cf08f6fbec7939f1dc18d1372415150373ba9c4bf96c3d928459d130f77d"} Jan 05 22:14:22 crc kubenswrapper[4910]: I0105 22:14:22.642856 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 22:14:22 crc kubenswrapper[4910]: I0105 22:14:22.723080 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r9m96\" (UniqueName: \"kubernetes.io/projected/3f2250f2-3745-4cfd-8431-9d653c587b63-kube-api-access-r9m96\") pod \"3f2250f2-3745-4cfd-8431-9d653c587b63\" (UID: \"3f2250f2-3745-4cfd-8431-9d653c587b63\") " Jan 05 22:14:22 crc kubenswrapper[4910]: I0105 22:14:22.723603 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3f2250f2-3745-4cfd-8431-9d653c587b63-log-httpd\") pod \"3f2250f2-3745-4cfd-8431-9d653c587b63\" (UID: \"3f2250f2-3745-4cfd-8431-9d653c587b63\") " Jan 05 22:14:22 crc kubenswrapper[4910]: I0105 22:14:22.723770 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3f2250f2-3745-4cfd-8431-9d653c587b63-run-httpd\") pod \"3f2250f2-3745-4cfd-8431-9d653c587b63\" (UID: \"3f2250f2-3745-4cfd-8431-9d653c587b63\") " Jan 05 22:14:22 crc kubenswrapper[4910]: I0105 22:14:22.723824 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f2250f2-3745-4cfd-8431-9d653c587b63-config-data\") pod \"3f2250f2-3745-4cfd-8431-9d653c587b63\" (UID: \"3f2250f2-3745-4cfd-8431-9d653c587b63\") " Jan 05 22:14:22 crc kubenswrapper[4910]: I0105 22:14:22.723905 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f2250f2-3745-4cfd-8431-9d653c587b63-combined-ca-bundle\") pod \"3f2250f2-3745-4cfd-8431-9d653c587b63\" (UID: \"3f2250f2-3745-4cfd-8431-9d653c587b63\") " Jan 05 22:14:22 crc kubenswrapper[4910]: I0105 22:14:22.723932 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f2250f2-3745-4cfd-8431-9d653c587b63-ceilometer-tls-certs\") pod \"3f2250f2-3745-4cfd-8431-9d653c587b63\" (UID: \"3f2250f2-3745-4cfd-8431-9d653c587b63\") " Jan 05 22:14:22 crc kubenswrapper[4910]: I0105 22:14:22.724035 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3f2250f2-3745-4cfd-8431-9d653c587b63-sg-core-conf-yaml\") pod \"3f2250f2-3745-4cfd-8431-9d653c587b63\" (UID: \"3f2250f2-3745-4cfd-8431-9d653c587b63\") " Jan 05 22:14:22 crc kubenswrapper[4910]: I0105 22:14:22.724068 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f2250f2-3745-4cfd-8431-9d653c587b63-scripts\") pod \"3f2250f2-3745-4cfd-8431-9d653c587b63\" (UID: \"3f2250f2-3745-4cfd-8431-9d653c587b63\") " Jan 05 22:14:22 crc kubenswrapper[4910]: I0105 22:14:22.724262 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f2250f2-3745-4cfd-8431-9d653c587b63-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "3f2250f2-3745-4cfd-8431-9d653c587b63" (UID: "3f2250f2-3745-4cfd-8431-9d653c587b63"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:14:22 crc kubenswrapper[4910]: I0105 22:14:22.724590 4910 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3f2250f2-3745-4cfd-8431-9d653c587b63-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:22 crc kubenswrapper[4910]: I0105 22:14:22.724774 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f2250f2-3745-4cfd-8431-9d653c587b63-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "3f2250f2-3745-4cfd-8431-9d653c587b63" (UID: "3f2250f2-3745-4cfd-8431-9d653c587b63"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:14:22 crc kubenswrapper[4910]: I0105 22:14:22.729855 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f2250f2-3745-4cfd-8431-9d653c587b63-kube-api-access-r9m96" (OuterVolumeSpecName: "kube-api-access-r9m96") pod "3f2250f2-3745-4cfd-8431-9d653c587b63" (UID: "3f2250f2-3745-4cfd-8431-9d653c587b63"). InnerVolumeSpecName "kube-api-access-r9m96". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:14:22 crc kubenswrapper[4910]: I0105 22:14:22.739440 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f2250f2-3745-4cfd-8431-9d653c587b63-scripts" (OuterVolumeSpecName: "scripts") pod "3f2250f2-3745-4cfd-8431-9d653c587b63" (UID: "3f2250f2-3745-4cfd-8431-9d653c587b63"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:14:22 crc kubenswrapper[4910]: I0105 22:14:22.757025 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f2250f2-3745-4cfd-8431-9d653c587b63-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "3f2250f2-3745-4cfd-8431-9d653c587b63" (UID: "3f2250f2-3745-4cfd-8431-9d653c587b63"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:14:22 crc kubenswrapper[4910]: I0105 22:14:22.795270 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f2250f2-3745-4cfd-8431-9d653c587b63-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "3f2250f2-3745-4cfd-8431-9d653c587b63" (UID: "3f2250f2-3745-4cfd-8431-9d653c587b63"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:14:22 crc kubenswrapper[4910]: I0105 22:14:22.827474 4910 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3f2250f2-3745-4cfd-8431-9d653c587b63-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:22 crc kubenswrapper[4910]: I0105 22:14:22.827511 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f2250f2-3745-4cfd-8431-9d653c587b63-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:22 crc kubenswrapper[4910]: I0105 22:14:22.827523 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r9m96\" (UniqueName: \"kubernetes.io/projected/3f2250f2-3745-4cfd-8431-9d653c587b63-kube-api-access-r9m96\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:22 crc kubenswrapper[4910]: I0105 22:14:22.827536 4910 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3f2250f2-3745-4cfd-8431-9d653c587b63-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:22 crc kubenswrapper[4910]: I0105 22:14:22.827546 4910 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f2250f2-3745-4cfd-8431-9d653c587b63-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:22 crc kubenswrapper[4910]: I0105 22:14:22.831173 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f2250f2-3745-4cfd-8431-9d653c587b63-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3f2250f2-3745-4cfd-8431-9d653c587b63" (UID: "3f2250f2-3745-4cfd-8431-9d653c587b63"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:14:22 crc kubenswrapper[4910]: I0105 22:14:22.846405 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f2250f2-3745-4cfd-8431-9d653c587b63-config-data" (OuterVolumeSpecName: "config-data") pod "3f2250f2-3745-4cfd-8431-9d653c587b63" (UID: "3f2250f2-3745-4cfd-8431-9d653c587b63"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:14:22 crc kubenswrapper[4910]: I0105 22:14:22.929328 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f2250f2-3745-4cfd-8431-9d653c587b63-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:22 crc kubenswrapper[4910]: I0105 22:14:22.929363 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f2250f2-3745-4cfd-8431-9d653c587b63-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.250186 4910 generic.go:334] "Generic (PLEG): container finished" podID="3f2250f2-3745-4cfd-8431-9d653c587b63" containerID="999c2f7ca6118b09dbf9b8c9a92383cb7a09467572a0e86765bb0a727fb5a5d6" exitCode=0 Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.250224 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.250254 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3f2250f2-3745-4cfd-8431-9d653c587b63","Type":"ContainerDied","Data":"999c2f7ca6118b09dbf9b8c9a92383cb7a09467572a0e86765bb0a727fb5a5d6"} Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.250313 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3f2250f2-3745-4cfd-8431-9d653c587b63","Type":"ContainerDied","Data":"891455f579bc051763e37c94c472409763e8a997eed82f29ebc22f2aa0974e36"} Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.250334 4910 scope.go:117] "RemoveContainer" containerID="852ee4e47f911c7c2fcd4579b638cfe58eb083e3d449b414069d34e2e083e724" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.315046 4910 scope.go:117] "RemoveContainer" containerID="ef1d83a2d2057417c628c88f388bdfbf088c3937e36d5b1f7cbfa026594aa33d" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.329287 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.341301 4910 scope.go:117] "RemoveContainer" containerID="999c2f7ca6118b09dbf9b8c9a92383cb7a09467572a0e86765bb0a727fb5a5d6" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.342609 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.356998 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:14:23 crc kubenswrapper[4910]: E0105 22:14:23.357437 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f2250f2-3745-4cfd-8431-9d653c587b63" containerName="ceilometer-notification-agent" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.357459 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f2250f2-3745-4cfd-8431-9d653c587b63" containerName="ceilometer-notification-agent" Jan 05 22:14:23 crc kubenswrapper[4910]: E0105 22:14:23.357468 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f2250f2-3745-4cfd-8431-9d653c587b63" containerName="proxy-httpd" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.357476 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f2250f2-3745-4cfd-8431-9d653c587b63" containerName="proxy-httpd" Jan 05 22:14:23 crc kubenswrapper[4910]: E0105 22:14:23.357489 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f2250f2-3745-4cfd-8431-9d653c587b63" containerName="sg-core" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.357496 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f2250f2-3745-4cfd-8431-9d653c587b63" containerName="sg-core" Jan 05 22:14:23 crc kubenswrapper[4910]: E0105 22:14:23.357512 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f2250f2-3745-4cfd-8431-9d653c587b63" containerName="ceilometer-central-agent" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.357518 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f2250f2-3745-4cfd-8431-9d653c587b63" containerName="ceilometer-central-agent" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.357738 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f2250f2-3745-4cfd-8431-9d653c587b63" containerName="ceilometer-central-agent" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.357760 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f2250f2-3745-4cfd-8431-9d653c587b63" containerName="sg-core" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.357774 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f2250f2-3745-4cfd-8431-9d653c587b63" containerName="ceilometer-notification-agent" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.357791 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f2250f2-3745-4cfd-8431-9d653c587b63" containerName="proxy-httpd" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.371642 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.375547 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.375548 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.375737 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.375995 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.393222 4910 scope.go:117] "RemoveContainer" containerID="d093b482c421adc7d82da4245d88adc7b270620fd4852fc0d78a8373c23ea00e" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.422418 4910 scope.go:117] "RemoveContainer" containerID="852ee4e47f911c7c2fcd4579b638cfe58eb083e3d449b414069d34e2e083e724" Jan 05 22:14:23 crc kubenswrapper[4910]: E0105 22:14:23.422912 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"852ee4e47f911c7c2fcd4579b638cfe58eb083e3d449b414069d34e2e083e724\": container with ID starting with 852ee4e47f911c7c2fcd4579b638cfe58eb083e3d449b414069d34e2e083e724 not found: ID does not exist" containerID="852ee4e47f911c7c2fcd4579b638cfe58eb083e3d449b414069d34e2e083e724" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.422949 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"852ee4e47f911c7c2fcd4579b638cfe58eb083e3d449b414069d34e2e083e724"} err="failed to get container status \"852ee4e47f911c7c2fcd4579b638cfe58eb083e3d449b414069d34e2e083e724\": rpc error: code = NotFound desc = could not find container \"852ee4e47f911c7c2fcd4579b638cfe58eb083e3d449b414069d34e2e083e724\": container with ID starting with 852ee4e47f911c7c2fcd4579b638cfe58eb083e3d449b414069d34e2e083e724 not found: ID does not exist" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.422977 4910 scope.go:117] "RemoveContainer" containerID="ef1d83a2d2057417c628c88f388bdfbf088c3937e36d5b1f7cbfa026594aa33d" Jan 05 22:14:23 crc kubenswrapper[4910]: E0105 22:14:23.423202 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef1d83a2d2057417c628c88f388bdfbf088c3937e36d5b1f7cbfa026594aa33d\": container with ID starting with ef1d83a2d2057417c628c88f388bdfbf088c3937e36d5b1f7cbfa026594aa33d not found: ID does not exist" containerID="ef1d83a2d2057417c628c88f388bdfbf088c3937e36d5b1f7cbfa026594aa33d" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.423226 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef1d83a2d2057417c628c88f388bdfbf088c3937e36d5b1f7cbfa026594aa33d"} err="failed to get container status \"ef1d83a2d2057417c628c88f388bdfbf088c3937e36d5b1f7cbfa026594aa33d\": rpc error: code = NotFound desc = could not find container \"ef1d83a2d2057417c628c88f388bdfbf088c3937e36d5b1f7cbfa026594aa33d\": container with ID starting with ef1d83a2d2057417c628c88f388bdfbf088c3937e36d5b1f7cbfa026594aa33d not found: ID does not exist" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.423240 4910 scope.go:117] "RemoveContainer" containerID="999c2f7ca6118b09dbf9b8c9a92383cb7a09467572a0e86765bb0a727fb5a5d6" Jan 05 22:14:23 crc kubenswrapper[4910]: E0105 22:14:23.423515 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"999c2f7ca6118b09dbf9b8c9a92383cb7a09467572a0e86765bb0a727fb5a5d6\": container with ID starting with 999c2f7ca6118b09dbf9b8c9a92383cb7a09467572a0e86765bb0a727fb5a5d6 not found: ID does not exist" containerID="999c2f7ca6118b09dbf9b8c9a92383cb7a09467572a0e86765bb0a727fb5a5d6" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.423550 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"999c2f7ca6118b09dbf9b8c9a92383cb7a09467572a0e86765bb0a727fb5a5d6"} err="failed to get container status \"999c2f7ca6118b09dbf9b8c9a92383cb7a09467572a0e86765bb0a727fb5a5d6\": rpc error: code = NotFound desc = could not find container \"999c2f7ca6118b09dbf9b8c9a92383cb7a09467572a0e86765bb0a727fb5a5d6\": container with ID starting with 999c2f7ca6118b09dbf9b8c9a92383cb7a09467572a0e86765bb0a727fb5a5d6 not found: ID does not exist" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.423567 4910 scope.go:117] "RemoveContainer" containerID="d093b482c421adc7d82da4245d88adc7b270620fd4852fc0d78a8373c23ea00e" Jan 05 22:14:23 crc kubenswrapper[4910]: E0105 22:14:23.423955 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d093b482c421adc7d82da4245d88adc7b270620fd4852fc0d78a8373c23ea00e\": container with ID starting with d093b482c421adc7d82da4245d88adc7b270620fd4852fc0d78a8373c23ea00e not found: ID does not exist" containerID="d093b482c421adc7d82da4245d88adc7b270620fd4852fc0d78a8373c23ea00e" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.424012 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d093b482c421adc7d82da4245d88adc7b270620fd4852fc0d78a8373c23ea00e"} err="failed to get container status \"d093b482c421adc7d82da4245d88adc7b270620fd4852fc0d78a8373c23ea00e\": rpc error: code = NotFound desc = could not find container \"d093b482c421adc7d82da4245d88adc7b270620fd4852fc0d78a8373c23ea00e\": container with ID starting with d093b482c421adc7d82da4245d88adc7b270620fd4852fc0d78a8373c23ea00e not found: ID does not exist" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.539535 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d881977-4280-42f6-8ec5-65be97c8dc28-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3d881977-4280-42f6-8ec5-65be97c8dc28\") " pod="openstack/ceilometer-0" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.539589 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d881977-4280-42f6-8ec5-65be97c8dc28-run-httpd\") pod \"ceilometer-0\" (UID: \"3d881977-4280-42f6-8ec5-65be97c8dc28\") " pod="openstack/ceilometer-0" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.539629 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3d881977-4280-42f6-8ec5-65be97c8dc28-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3d881977-4280-42f6-8ec5-65be97c8dc28\") " pod="openstack/ceilometer-0" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.539675 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d881977-4280-42f6-8ec5-65be97c8dc28-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3d881977-4280-42f6-8ec5-65be97c8dc28\") " pod="openstack/ceilometer-0" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.539742 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d881977-4280-42f6-8ec5-65be97c8dc28-log-httpd\") pod \"ceilometer-0\" (UID: \"3d881977-4280-42f6-8ec5-65be97c8dc28\") " pod="openstack/ceilometer-0" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.539778 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d881977-4280-42f6-8ec5-65be97c8dc28-config-data\") pod \"ceilometer-0\" (UID: \"3d881977-4280-42f6-8ec5-65be97c8dc28\") " pod="openstack/ceilometer-0" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.539801 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d881977-4280-42f6-8ec5-65be97c8dc28-scripts\") pod \"ceilometer-0\" (UID: \"3d881977-4280-42f6-8ec5-65be97c8dc28\") " pod="openstack/ceilometer-0" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.539835 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mxxs2\" (UniqueName: \"kubernetes.io/projected/3d881977-4280-42f6-8ec5-65be97c8dc28-kube-api-access-mxxs2\") pod \"ceilometer-0\" (UID: \"3d881977-4280-42f6-8ec5-65be97c8dc28\") " pod="openstack/ceilometer-0" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.642112 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d881977-4280-42f6-8ec5-65be97c8dc28-scripts\") pod \"ceilometer-0\" (UID: \"3d881977-4280-42f6-8ec5-65be97c8dc28\") " pod="openstack/ceilometer-0" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.642207 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mxxs2\" (UniqueName: \"kubernetes.io/projected/3d881977-4280-42f6-8ec5-65be97c8dc28-kube-api-access-mxxs2\") pod \"ceilometer-0\" (UID: \"3d881977-4280-42f6-8ec5-65be97c8dc28\") " pod="openstack/ceilometer-0" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.642254 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d881977-4280-42f6-8ec5-65be97c8dc28-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3d881977-4280-42f6-8ec5-65be97c8dc28\") " pod="openstack/ceilometer-0" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.642274 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d881977-4280-42f6-8ec5-65be97c8dc28-run-httpd\") pod \"ceilometer-0\" (UID: \"3d881977-4280-42f6-8ec5-65be97c8dc28\") " pod="openstack/ceilometer-0" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.642297 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3d881977-4280-42f6-8ec5-65be97c8dc28-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3d881977-4280-42f6-8ec5-65be97c8dc28\") " pod="openstack/ceilometer-0" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.642335 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d881977-4280-42f6-8ec5-65be97c8dc28-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3d881977-4280-42f6-8ec5-65be97c8dc28\") " pod="openstack/ceilometer-0" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.642397 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d881977-4280-42f6-8ec5-65be97c8dc28-log-httpd\") pod \"ceilometer-0\" (UID: \"3d881977-4280-42f6-8ec5-65be97c8dc28\") " pod="openstack/ceilometer-0" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.642433 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d881977-4280-42f6-8ec5-65be97c8dc28-config-data\") pod \"ceilometer-0\" (UID: \"3d881977-4280-42f6-8ec5-65be97c8dc28\") " pod="openstack/ceilometer-0" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.643246 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d881977-4280-42f6-8ec5-65be97c8dc28-run-httpd\") pod \"ceilometer-0\" (UID: \"3d881977-4280-42f6-8ec5-65be97c8dc28\") " pod="openstack/ceilometer-0" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.643970 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d881977-4280-42f6-8ec5-65be97c8dc28-log-httpd\") pod \"ceilometer-0\" (UID: \"3d881977-4280-42f6-8ec5-65be97c8dc28\") " pod="openstack/ceilometer-0" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.648528 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3d881977-4280-42f6-8ec5-65be97c8dc28-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3d881977-4280-42f6-8ec5-65be97c8dc28\") " pod="openstack/ceilometer-0" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.648934 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d881977-4280-42f6-8ec5-65be97c8dc28-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3d881977-4280-42f6-8ec5-65be97c8dc28\") " pod="openstack/ceilometer-0" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.648986 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d881977-4280-42f6-8ec5-65be97c8dc28-config-data\") pod \"ceilometer-0\" (UID: \"3d881977-4280-42f6-8ec5-65be97c8dc28\") " pod="openstack/ceilometer-0" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.661942 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d881977-4280-42f6-8ec5-65be97c8dc28-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3d881977-4280-42f6-8ec5-65be97c8dc28\") " pod="openstack/ceilometer-0" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.662616 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mxxs2\" (UniqueName: \"kubernetes.io/projected/3d881977-4280-42f6-8ec5-65be97c8dc28-kube-api-access-mxxs2\") pod \"ceilometer-0\" (UID: \"3d881977-4280-42f6-8ec5-65be97c8dc28\") " pod="openstack/ceilometer-0" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.664622 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d881977-4280-42f6-8ec5-65be97c8dc28-scripts\") pod \"ceilometer-0\" (UID: \"3d881977-4280-42f6-8ec5-65be97c8dc28\") " pod="openstack/ceilometer-0" Jan 05 22:14:23 crc kubenswrapper[4910]: I0105 22:14:23.695053 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 22:14:24 crc kubenswrapper[4910]: I0105 22:14:24.309753 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:14:24 crc kubenswrapper[4910]: I0105 22:14:24.732591 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f2250f2-3745-4cfd-8431-9d653c587b63" path="/var/lib/kubelet/pods/3f2250f2-3745-4cfd-8431-9d653c587b63/volumes" Jan 05 22:14:24 crc kubenswrapper[4910]: I0105 22:14:24.762988 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 05 22:14:24 crc kubenswrapper[4910]: I0105 22:14:24.877933 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b6624df-a935-4a61-8bff-0033ae391bbe-combined-ca-bundle\") pod \"5b6624df-a935-4a61-8bff-0033ae391bbe\" (UID: \"5b6624df-a935-4a61-8bff-0033ae391bbe\") " Jan 05 22:14:24 crc kubenswrapper[4910]: I0105 22:14:24.878018 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5b6624df-a935-4a61-8bff-0033ae391bbe-logs\") pod \"5b6624df-a935-4a61-8bff-0033ae391bbe\" (UID: \"5b6624df-a935-4a61-8bff-0033ae391bbe\") " Jan 05 22:14:24 crc kubenswrapper[4910]: I0105 22:14:24.878368 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b6624df-a935-4a61-8bff-0033ae391bbe-config-data\") pod \"5b6624df-a935-4a61-8bff-0033ae391bbe\" (UID: \"5b6624df-a935-4a61-8bff-0033ae391bbe\") " Jan 05 22:14:24 crc kubenswrapper[4910]: I0105 22:14:24.878491 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jj7wb\" (UniqueName: \"kubernetes.io/projected/5b6624df-a935-4a61-8bff-0033ae391bbe-kube-api-access-jj7wb\") pod \"5b6624df-a935-4a61-8bff-0033ae391bbe\" (UID: \"5b6624df-a935-4a61-8bff-0033ae391bbe\") " Jan 05 22:14:24 crc kubenswrapper[4910]: I0105 22:14:24.878845 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b6624df-a935-4a61-8bff-0033ae391bbe-logs" (OuterVolumeSpecName: "logs") pod "5b6624df-a935-4a61-8bff-0033ae391bbe" (UID: "5b6624df-a935-4a61-8bff-0033ae391bbe"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:14:24 crc kubenswrapper[4910]: I0105 22:14:24.880074 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5b6624df-a935-4a61-8bff-0033ae391bbe-logs\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:24 crc kubenswrapper[4910]: I0105 22:14:24.884942 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b6624df-a935-4a61-8bff-0033ae391bbe-kube-api-access-jj7wb" (OuterVolumeSpecName: "kube-api-access-jj7wb") pod "5b6624df-a935-4a61-8bff-0033ae391bbe" (UID: "5b6624df-a935-4a61-8bff-0033ae391bbe"). InnerVolumeSpecName "kube-api-access-jj7wb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:14:24 crc kubenswrapper[4910]: I0105 22:14:24.923480 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b6624df-a935-4a61-8bff-0033ae391bbe-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5b6624df-a935-4a61-8bff-0033ae391bbe" (UID: "5b6624df-a935-4a61-8bff-0033ae391bbe"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:14:24 crc kubenswrapper[4910]: I0105 22:14:24.926773 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b6624df-a935-4a61-8bff-0033ae391bbe-config-data" (OuterVolumeSpecName: "config-data") pod "5b6624df-a935-4a61-8bff-0033ae391bbe" (UID: "5b6624df-a935-4a61-8bff-0033ae391bbe"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:14:24 crc kubenswrapper[4910]: I0105 22:14:24.981815 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jj7wb\" (UniqueName: \"kubernetes.io/projected/5b6624df-a935-4a61-8bff-0033ae391bbe-kube-api-access-jj7wb\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:24 crc kubenswrapper[4910]: I0105 22:14:24.981844 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b6624df-a935-4a61-8bff-0033ae391bbe-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:24 crc kubenswrapper[4910]: I0105 22:14:24.981855 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b6624df-a935-4a61-8bff-0033ae391bbe-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.270611 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d881977-4280-42f6-8ec5-65be97c8dc28","Type":"ContainerStarted","Data":"e2d862c3152a2babe7e6e933e033e365153addf3d9f0e0a5bfdf820d3c653e68"} Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.270693 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d881977-4280-42f6-8ec5-65be97c8dc28","Type":"ContainerStarted","Data":"8cc82c8ba918ec8c625a3be0674e6dbbdff1bcd246160f5bcea2d78987f50edb"} Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.273539 4910 generic.go:334] "Generic (PLEG): container finished" podID="5b6624df-a935-4a61-8bff-0033ae391bbe" containerID="12414d2a86b90ebb57ff3d6681b6e068c983f1c382ed067e604262d5fc4535e8" exitCode=0 Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.273571 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5b6624df-a935-4a61-8bff-0033ae391bbe","Type":"ContainerDied","Data":"12414d2a86b90ebb57ff3d6681b6e068c983f1c382ed067e604262d5fc4535e8"} Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.273589 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5b6624df-a935-4a61-8bff-0033ae391bbe","Type":"ContainerDied","Data":"2b28673a467f6a655658de03e18c731e41f3424a3b2ca4ef2d62673073a73116"} Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.273608 4910 scope.go:117] "RemoveContainer" containerID="12414d2a86b90ebb57ff3d6681b6e068c983f1c382ed067e604262d5fc4535e8" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.273742 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.298919 4910 scope.go:117] "RemoveContainer" containerID="7287cf08f6fbec7939f1dc18d1372415150373ba9c4bf96c3d928459d130f77d" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.311038 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.334042 4910 scope.go:117] "RemoveContainer" containerID="12414d2a86b90ebb57ff3d6681b6e068c983f1c382ed067e604262d5fc4535e8" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.334161 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 05 22:14:25 crc kubenswrapper[4910]: E0105 22:14:25.336293 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"12414d2a86b90ebb57ff3d6681b6e068c983f1c382ed067e604262d5fc4535e8\": container with ID starting with 12414d2a86b90ebb57ff3d6681b6e068c983f1c382ed067e604262d5fc4535e8 not found: ID does not exist" containerID="12414d2a86b90ebb57ff3d6681b6e068c983f1c382ed067e604262d5fc4535e8" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.336353 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12414d2a86b90ebb57ff3d6681b6e068c983f1c382ed067e604262d5fc4535e8"} err="failed to get container status \"12414d2a86b90ebb57ff3d6681b6e068c983f1c382ed067e604262d5fc4535e8\": rpc error: code = NotFound desc = could not find container \"12414d2a86b90ebb57ff3d6681b6e068c983f1c382ed067e604262d5fc4535e8\": container with ID starting with 12414d2a86b90ebb57ff3d6681b6e068c983f1c382ed067e604262d5fc4535e8 not found: ID does not exist" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.336430 4910 scope.go:117] "RemoveContainer" containerID="7287cf08f6fbec7939f1dc18d1372415150373ba9c4bf96c3d928459d130f77d" Jan 05 22:14:25 crc kubenswrapper[4910]: E0105 22:14:25.336854 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7287cf08f6fbec7939f1dc18d1372415150373ba9c4bf96c3d928459d130f77d\": container with ID starting with 7287cf08f6fbec7939f1dc18d1372415150373ba9c4bf96c3d928459d130f77d not found: ID does not exist" containerID="7287cf08f6fbec7939f1dc18d1372415150373ba9c4bf96c3d928459d130f77d" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.336895 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7287cf08f6fbec7939f1dc18d1372415150373ba9c4bf96c3d928459d130f77d"} err="failed to get container status \"7287cf08f6fbec7939f1dc18d1372415150373ba9c4bf96c3d928459d130f77d\": rpc error: code = NotFound desc = could not find container \"7287cf08f6fbec7939f1dc18d1372415150373ba9c4bf96c3d928459d130f77d\": container with ID starting with 7287cf08f6fbec7939f1dc18d1372415150373ba9c4bf96c3d928459d130f77d not found: ID does not exist" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.351658 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 05 22:14:25 crc kubenswrapper[4910]: E0105 22:14:25.352057 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b6624df-a935-4a61-8bff-0033ae391bbe" containerName="nova-api-api" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.352074 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b6624df-a935-4a61-8bff-0033ae391bbe" containerName="nova-api-api" Jan 05 22:14:25 crc kubenswrapper[4910]: E0105 22:14:25.352107 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b6624df-a935-4a61-8bff-0033ae391bbe" containerName="nova-api-log" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.352127 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b6624df-a935-4a61-8bff-0033ae391bbe" containerName="nova-api-log" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.352280 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b6624df-a935-4a61-8bff-0033ae391bbe" containerName="nova-api-log" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.352308 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b6624df-a935-4a61-8bff-0033ae391bbe" containerName="nova-api-api" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.353333 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.355876 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.356058 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.356223 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.379367 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.493106 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dsvv2\" (UniqueName: \"kubernetes.io/projected/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-kube-api-access-dsvv2\") pod \"nova-api-0\" (UID: \"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997\") " pod="openstack/nova-api-0" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.493191 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-config-data\") pod \"nova-api-0\" (UID: \"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997\") " pod="openstack/nova-api-0" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.493266 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-public-tls-certs\") pod \"nova-api-0\" (UID: \"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997\") " pod="openstack/nova-api-0" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.493291 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-logs\") pod \"nova-api-0\" (UID: \"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997\") " pod="openstack/nova-api-0" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.493324 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997\") " pod="openstack/nova-api-0" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.493355 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-internal-tls-certs\") pod \"nova-api-0\" (UID: \"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997\") " pod="openstack/nova-api-0" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.522495 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.540450 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.596040 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dsvv2\" (UniqueName: \"kubernetes.io/projected/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-kube-api-access-dsvv2\") pod \"nova-api-0\" (UID: \"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997\") " pod="openstack/nova-api-0" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.596157 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-config-data\") pod \"nova-api-0\" (UID: \"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997\") " pod="openstack/nova-api-0" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.596316 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-public-tls-certs\") pod \"nova-api-0\" (UID: \"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997\") " pod="openstack/nova-api-0" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.596338 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-logs\") pod \"nova-api-0\" (UID: \"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997\") " pod="openstack/nova-api-0" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.596390 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997\") " pod="openstack/nova-api-0" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.596437 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-internal-tls-certs\") pod \"nova-api-0\" (UID: \"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997\") " pod="openstack/nova-api-0" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.596835 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-logs\") pod \"nova-api-0\" (UID: \"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997\") " pod="openstack/nova-api-0" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.601012 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-public-tls-certs\") pod \"nova-api-0\" (UID: \"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997\") " pod="openstack/nova-api-0" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.601145 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-internal-tls-certs\") pod \"nova-api-0\" (UID: \"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997\") " pod="openstack/nova-api-0" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.601705 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-config-data\") pod \"nova-api-0\" (UID: \"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997\") " pod="openstack/nova-api-0" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.602287 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997\") " pod="openstack/nova-api-0" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.616318 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dsvv2\" (UniqueName: \"kubernetes.io/projected/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-kube-api-access-dsvv2\") pod \"nova-api-0\" (UID: \"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997\") " pod="openstack/nova-api-0" Jan 05 22:14:25 crc kubenswrapper[4910]: I0105 22:14:25.687674 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 05 22:14:26 crc kubenswrapper[4910]: W0105 22:14:26.205873 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb51a1e52_8fcf_4cc5_9f3d_1544dc64f997.slice/crio-40b52dcc429ddab35497d594e56fdfcba6fef7f1067b4c6358ec97da65a6d135 WatchSource:0}: Error finding container 40b52dcc429ddab35497d594e56fdfcba6fef7f1067b4c6358ec97da65a6d135: Status 404 returned error can't find the container with id 40b52dcc429ddab35497d594e56fdfcba6fef7f1067b4c6358ec97da65a6d135 Jan 05 22:14:26 crc kubenswrapper[4910]: I0105 22:14:26.206049 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 05 22:14:26 crc kubenswrapper[4910]: I0105 22:14:26.286087 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997","Type":"ContainerStarted","Data":"40b52dcc429ddab35497d594e56fdfcba6fef7f1067b4c6358ec97da65a6d135"} Jan 05 22:14:26 crc kubenswrapper[4910]: I0105 22:14:26.288688 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d881977-4280-42f6-8ec5-65be97c8dc28","Type":"ContainerStarted","Data":"b20fb279b37814b85e4faff0ba6be368ee421956a3846dd7b5ccb446665cf296"} Jan 05 22:14:26 crc kubenswrapper[4910]: I0105 22:14:26.332990 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:14:26 crc kubenswrapper[4910]: I0105 22:14:26.660189 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-5dg7h"] Jan 05 22:14:26 crc kubenswrapper[4910]: I0105 22:14:26.662015 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-5dg7h" Jan 05 22:14:26 crc kubenswrapper[4910]: I0105 22:14:26.665877 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Jan 05 22:14:26 crc kubenswrapper[4910]: I0105 22:14:26.666294 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Jan 05 22:14:26 crc kubenswrapper[4910]: I0105 22:14:26.674478 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-5dg7h"] Jan 05 22:14:26 crc kubenswrapper[4910]: I0105 22:14:26.735634 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b6624df-a935-4a61-8bff-0033ae391bbe" path="/var/lib/kubelet/pods/5b6624df-a935-4a61-8bff-0033ae391bbe/volumes" Jan 05 22:14:26 crc kubenswrapper[4910]: I0105 22:14:26.836075 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dda642c8-96ed-4c08-be87-119551bcd735-config-data\") pod \"nova-cell1-cell-mapping-5dg7h\" (UID: \"dda642c8-96ed-4c08-be87-119551bcd735\") " pod="openstack/nova-cell1-cell-mapping-5dg7h" Jan 05 22:14:26 crc kubenswrapper[4910]: I0105 22:14:26.836167 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s98rk\" (UniqueName: \"kubernetes.io/projected/dda642c8-96ed-4c08-be87-119551bcd735-kube-api-access-s98rk\") pod \"nova-cell1-cell-mapping-5dg7h\" (UID: \"dda642c8-96ed-4c08-be87-119551bcd735\") " pod="openstack/nova-cell1-cell-mapping-5dg7h" Jan 05 22:14:26 crc kubenswrapper[4910]: I0105 22:14:26.836277 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dda642c8-96ed-4c08-be87-119551bcd735-scripts\") pod \"nova-cell1-cell-mapping-5dg7h\" (UID: \"dda642c8-96ed-4c08-be87-119551bcd735\") " pod="openstack/nova-cell1-cell-mapping-5dg7h" Jan 05 22:14:26 crc kubenswrapper[4910]: I0105 22:14:26.836299 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dda642c8-96ed-4c08-be87-119551bcd735-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-5dg7h\" (UID: \"dda642c8-96ed-4c08-be87-119551bcd735\") " pod="openstack/nova-cell1-cell-mapping-5dg7h" Jan 05 22:14:26 crc kubenswrapper[4910]: I0105 22:14:26.938092 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s98rk\" (UniqueName: \"kubernetes.io/projected/dda642c8-96ed-4c08-be87-119551bcd735-kube-api-access-s98rk\") pod \"nova-cell1-cell-mapping-5dg7h\" (UID: \"dda642c8-96ed-4c08-be87-119551bcd735\") " pod="openstack/nova-cell1-cell-mapping-5dg7h" Jan 05 22:14:26 crc kubenswrapper[4910]: I0105 22:14:26.938212 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dda642c8-96ed-4c08-be87-119551bcd735-scripts\") pod \"nova-cell1-cell-mapping-5dg7h\" (UID: \"dda642c8-96ed-4c08-be87-119551bcd735\") " pod="openstack/nova-cell1-cell-mapping-5dg7h" Jan 05 22:14:26 crc kubenswrapper[4910]: I0105 22:14:26.938235 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dda642c8-96ed-4c08-be87-119551bcd735-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-5dg7h\" (UID: \"dda642c8-96ed-4c08-be87-119551bcd735\") " pod="openstack/nova-cell1-cell-mapping-5dg7h" Jan 05 22:14:26 crc kubenswrapper[4910]: I0105 22:14:26.938321 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dda642c8-96ed-4c08-be87-119551bcd735-config-data\") pod \"nova-cell1-cell-mapping-5dg7h\" (UID: \"dda642c8-96ed-4c08-be87-119551bcd735\") " pod="openstack/nova-cell1-cell-mapping-5dg7h" Jan 05 22:14:26 crc kubenswrapper[4910]: I0105 22:14:26.947785 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dda642c8-96ed-4c08-be87-119551bcd735-config-data\") pod \"nova-cell1-cell-mapping-5dg7h\" (UID: \"dda642c8-96ed-4c08-be87-119551bcd735\") " pod="openstack/nova-cell1-cell-mapping-5dg7h" Jan 05 22:14:26 crc kubenswrapper[4910]: I0105 22:14:26.948325 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dda642c8-96ed-4c08-be87-119551bcd735-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-5dg7h\" (UID: \"dda642c8-96ed-4c08-be87-119551bcd735\") " pod="openstack/nova-cell1-cell-mapping-5dg7h" Jan 05 22:14:26 crc kubenswrapper[4910]: I0105 22:14:26.961700 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dda642c8-96ed-4c08-be87-119551bcd735-scripts\") pod \"nova-cell1-cell-mapping-5dg7h\" (UID: \"dda642c8-96ed-4c08-be87-119551bcd735\") " pod="openstack/nova-cell1-cell-mapping-5dg7h" Jan 05 22:14:26 crc kubenswrapper[4910]: I0105 22:14:26.962757 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s98rk\" (UniqueName: \"kubernetes.io/projected/dda642c8-96ed-4c08-be87-119551bcd735-kube-api-access-s98rk\") pod \"nova-cell1-cell-mapping-5dg7h\" (UID: \"dda642c8-96ed-4c08-be87-119551bcd735\") " pod="openstack/nova-cell1-cell-mapping-5dg7h" Jan 05 22:14:26 crc kubenswrapper[4910]: I0105 22:14:26.984695 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-5dg7h" Jan 05 22:14:27 crc kubenswrapper[4910]: I0105 22:14:27.317859 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997","Type":"ContainerStarted","Data":"225fd4ef33468515e6f1bf9d22c98bd24fe8cd3c7cb7ae5d958d12e49f917cf1"} Jan 05 22:14:27 crc kubenswrapper[4910]: I0105 22:14:27.318180 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997","Type":"ContainerStarted","Data":"9c4645ac71fd926e93ce1dbed7cebdc8397b1f3e129ac334c6a8f7649338013c"} Jan 05 22:14:27 crc kubenswrapper[4910]: I0105 22:14:27.323462 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d881977-4280-42f6-8ec5-65be97c8dc28","Type":"ContainerStarted","Data":"f4147284652162acb83f98dd4b38c821d03f77ed60c1b2b0c22836ec39ba3492"} Jan 05 22:14:27 crc kubenswrapper[4910]: I0105 22:14:27.342030 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.342002901 podStartE2EDuration="2.342002901s" podCreationTimestamp="2026-01-05 22:14:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:14:27.338766211 +0000 UTC m=+1398.916263881" watchObservedRunningTime="2026-01-05 22:14:27.342002901 +0000 UTC m=+1398.919500571" Jan 05 22:14:27 crc kubenswrapper[4910]: I0105 22:14:27.570523 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-5dg7h"] Jan 05 22:14:27 crc kubenswrapper[4910]: W0105 22:14:27.574782 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddda642c8_96ed_4c08_be87_119551bcd735.slice/crio-a4b97f12c0b72322cb4a13eafa7ad96d7b50ae95878d6b71e3c1a97f51fb20f9 WatchSource:0}: Error finding container a4b97f12c0b72322cb4a13eafa7ad96d7b50ae95878d6b71e3c1a97f51fb20f9: Status 404 returned error can't find the container with id a4b97f12c0b72322cb4a13eafa7ad96d7b50ae95878d6b71e3c1a97f51fb20f9 Jan 05 22:14:27 crc kubenswrapper[4910]: I0105 22:14:27.709990 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-867cd545c7-pd68r" Jan 05 22:14:27 crc kubenswrapper[4910]: I0105 22:14:27.797256 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bfb54f9b5-zbvpw"] Jan 05 22:14:27 crc kubenswrapper[4910]: I0105 22:14:27.797614 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5bfb54f9b5-zbvpw" podUID="0a447ec9-7c46-472b-af0a-1c0633e4abf2" containerName="dnsmasq-dns" containerID="cri-o://b179dd5d1002d586fe0572c6bcbeb49af72f867d6a5101a1fc80bd913f604cff" gracePeriod=10 Jan 05 22:14:28 crc kubenswrapper[4910]: I0105 22:14:28.353849 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-5dg7h" event={"ID":"dda642c8-96ed-4c08-be87-119551bcd735","Type":"ContainerStarted","Data":"a15750a24f3b614dbf189a0ea0b3ef39ab10388dffcb53a3c89703b0c4d6c6f7"} Jan 05 22:14:28 crc kubenswrapper[4910]: I0105 22:14:28.354267 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-5dg7h" event={"ID":"dda642c8-96ed-4c08-be87-119551bcd735","Type":"ContainerStarted","Data":"a4b97f12c0b72322cb4a13eafa7ad96d7b50ae95878d6b71e3c1a97f51fb20f9"} Jan 05 22:14:28 crc kubenswrapper[4910]: I0105 22:14:28.361420 4910 generic.go:334] "Generic (PLEG): container finished" podID="0a447ec9-7c46-472b-af0a-1c0633e4abf2" containerID="b179dd5d1002d586fe0572c6bcbeb49af72f867d6a5101a1fc80bd913f604cff" exitCode=0 Jan 05 22:14:28 crc kubenswrapper[4910]: I0105 22:14:28.362398 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bfb54f9b5-zbvpw" event={"ID":"0a447ec9-7c46-472b-af0a-1c0633e4abf2","Type":"ContainerDied","Data":"b179dd5d1002d586fe0572c6bcbeb49af72f867d6a5101a1fc80bd913f604cff"} Jan 05 22:14:28 crc kubenswrapper[4910]: I0105 22:14:28.379291 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-5dg7h" podStartSLOduration=2.379266875 podStartE2EDuration="2.379266875s" podCreationTimestamp="2026-01-05 22:14:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:14:28.372677331 +0000 UTC m=+1399.950175001" watchObservedRunningTime="2026-01-05 22:14:28.379266875 +0000 UTC m=+1399.956764545" Jan 05 22:14:28 crc kubenswrapper[4910]: I0105 22:14:28.627765 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bfb54f9b5-zbvpw" Jan 05 22:14:28 crc kubenswrapper[4910]: I0105 22:14:28.777172 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cgsxd\" (UniqueName: \"kubernetes.io/projected/0a447ec9-7c46-472b-af0a-1c0633e4abf2-kube-api-access-cgsxd\") pod \"0a447ec9-7c46-472b-af0a-1c0633e4abf2\" (UID: \"0a447ec9-7c46-472b-af0a-1c0633e4abf2\") " Jan 05 22:14:28 crc kubenswrapper[4910]: I0105 22:14:28.777264 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0a447ec9-7c46-472b-af0a-1c0633e4abf2-dns-swift-storage-0\") pod \"0a447ec9-7c46-472b-af0a-1c0633e4abf2\" (UID: \"0a447ec9-7c46-472b-af0a-1c0633e4abf2\") " Jan 05 22:14:28 crc kubenswrapper[4910]: I0105 22:14:28.777325 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0a447ec9-7c46-472b-af0a-1c0633e4abf2-ovsdbserver-sb\") pod \"0a447ec9-7c46-472b-af0a-1c0633e4abf2\" (UID: \"0a447ec9-7c46-472b-af0a-1c0633e4abf2\") " Jan 05 22:14:28 crc kubenswrapper[4910]: I0105 22:14:28.777411 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0a447ec9-7c46-472b-af0a-1c0633e4abf2-ovsdbserver-nb\") pod \"0a447ec9-7c46-472b-af0a-1c0633e4abf2\" (UID: \"0a447ec9-7c46-472b-af0a-1c0633e4abf2\") " Jan 05 22:14:28 crc kubenswrapper[4910]: I0105 22:14:28.777480 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0a447ec9-7c46-472b-af0a-1c0633e4abf2-config\") pod \"0a447ec9-7c46-472b-af0a-1c0633e4abf2\" (UID: \"0a447ec9-7c46-472b-af0a-1c0633e4abf2\") " Jan 05 22:14:28 crc kubenswrapper[4910]: I0105 22:14:28.777554 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0a447ec9-7c46-472b-af0a-1c0633e4abf2-dns-svc\") pod \"0a447ec9-7c46-472b-af0a-1c0633e4abf2\" (UID: \"0a447ec9-7c46-472b-af0a-1c0633e4abf2\") " Jan 05 22:14:28 crc kubenswrapper[4910]: I0105 22:14:28.785889 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a447ec9-7c46-472b-af0a-1c0633e4abf2-kube-api-access-cgsxd" (OuterVolumeSpecName: "kube-api-access-cgsxd") pod "0a447ec9-7c46-472b-af0a-1c0633e4abf2" (UID: "0a447ec9-7c46-472b-af0a-1c0633e4abf2"). InnerVolumeSpecName "kube-api-access-cgsxd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:14:28 crc kubenswrapper[4910]: I0105 22:14:28.850928 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a447ec9-7c46-472b-af0a-1c0633e4abf2-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0a447ec9-7c46-472b-af0a-1c0633e4abf2" (UID: "0a447ec9-7c46-472b-af0a-1c0633e4abf2"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:14:28 crc kubenswrapper[4910]: I0105 22:14:28.860395 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a447ec9-7c46-472b-af0a-1c0633e4abf2-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "0a447ec9-7c46-472b-af0a-1c0633e4abf2" (UID: "0a447ec9-7c46-472b-af0a-1c0633e4abf2"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:14:28 crc kubenswrapper[4910]: I0105 22:14:28.860717 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a447ec9-7c46-472b-af0a-1c0633e4abf2-config" (OuterVolumeSpecName: "config") pod "0a447ec9-7c46-472b-af0a-1c0633e4abf2" (UID: "0a447ec9-7c46-472b-af0a-1c0633e4abf2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:14:28 crc kubenswrapper[4910]: I0105 22:14:28.865571 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a447ec9-7c46-472b-af0a-1c0633e4abf2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0a447ec9-7c46-472b-af0a-1c0633e4abf2" (UID: "0a447ec9-7c46-472b-af0a-1c0633e4abf2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:14:28 crc kubenswrapper[4910]: I0105 22:14:28.872008 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a447ec9-7c46-472b-af0a-1c0633e4abf2-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "0a447ec9-7c46-472b-af0a-1c0633e4abf2" (UID: "0a447ec9-7c46-472b-af0a-1c0633e4abf2"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:14:28 crc kubenswrapper[4910]: I0105 22:14:28.880378 4910 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0a447ec9-7c46-472b-af0a-1c0633e4abf2-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:28 crc kubenswrapper[4910]: I0105 22:14:28.880417 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0a447ec9-7c46-472b-af0a-1c0633e4abf2-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:28 crc kubenswrapper[4910]: I0105 22:14:28.880435 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0a447ec9-7c46-472b-af0a-1c0633e4abf2-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:28 crc kubenswrapper[4910]: I0105 22:14:28.880449 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0a447ec9-7c46-472b-af0a-1c0633e4abf2-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:28 crc kubenswrapper[4910]: I0105 22:14:28.880462 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0a447ec9-7c46-472b-af0a-1c0633e4abf2-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:28 crc kubenswrapper[4910]: I0105 22:14:28.880474 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cgsxd\" (UniqueName: \"kubernetes.io/projected/0a447ec9-7c46-472b-af0a-1c0633e4abf2-kube-api-access-cgsxd\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:29 crc kubenswrapper[4910]: I0105 22:14:29.373237 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bfb54f9b5-zbvpw" event={"ID":"0a447ec9-7c46-472b-af0a-1c0633e4abf2","Type":"ContainerDied","Data":"d6ebc9335df6a6464c6929dc8748349684b354d02157ee6e777576095af38727"} Jan 05 22:14:29 crc kubenswrapper[4910]: I0105 22:14:29.373323 4910 scope.go:117] "RemoveContainer" containerID="b179dd5d1002d586fe0572c6bcbeb49af72f867d6a5101a1fc80bd913f604cff" Jan 05 22:14:29 crc kubenswrapper[4910]: I0105 22:14:29.373259 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bfb54f9b5-zbvpw" Jan 05 22:14:29 crc kubenswrapper[4910]: I0105 22:14:29.394619 4910 scope.go:117] "RemoveContainer" containerID="ecf9d4442ec5888639a8263fc99dcb78cd90e05e8c3b8c08265be6959307e61d" Jan 05 22:14:29 crc kubenswrapper[4910]: I0105 22:14:29.410105 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bfb54f9b5-zbvpw"] Jan 05 22:14:29 crc kubenswrapper[4910]: I0105 22:14:29.419004 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5bfb54f9b5-zbvpw"] Jan 05 22:14:30 crc kubenswrapper[4910]: I0105 22:14:30.389312 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d881977-4280-42f6-8ec5-65be97c8dc28","Type":"ContainerStarted","Data":"6b6e40e5636a9a405ca504456fe3ac469d0bf34f5f35ee789bc2b3c7fd43ed8f"} Jan 05 22:14:30 crc kubenswrapper[4910]: I0105 22:14:30.390370 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 05 22:14:30 crc kubenswrapper[4910]: I0105 22:14:30.411909 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.902456667 podStartE2EDuration="7.411883751s" podCreationTimestamp="2026-01-05 22:14:23 +0000 UTC" firstStartedPulling="2026-01-05 22:14:24.325535678 +0000 UTC m=+1395.903033348" lastFinishedPulling="2026-01-05 22:14:29.834962762 +0000 UTC m=+1401.412460432" observedRunningTime="2026-01-05 22:14:30.408701672 +0000 UTC m=+1401.986199372" watchObservedRunningTime="2026-01-05 22:14:30.411883751 +0000 UTC m=+1401.989381421" Jan 05 22:14:30 crc kubenswrapper[4910]: I0105 22:14:30.731504 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a447ec9-7c46-472b-af0a-1c0633e4abf2" path="/var/lib/kubelet/pods/0a447ec9-7c46-472b-af0a-1c0633e4abf2/volumes" Jan 05 22:14:33 crc kubenswrapper[4910]: I0105 22:14:33.421507 4910 generic.go:334] "Generic (PLEG): container finished" podID="dda642c8-96ed-4c08-be87-119551bcd735" containerID="a15750a24f3b614dbf189a0ea0b3ef39ab10388dffcb53a3c89703b0c4d6c6f7" exitCode=0 Jan 05 22:14:33 crc kubenswrapper[4910]: I0105 22:14:33.421886 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-5dg7h" event={"ID":"dda642c8-96ed-4c08-be87-119551bcd735","Type":"ContainerDied","Data":"a15750a24f3b614dbf189a0ea0b3ef39ab10388dffcb53a3c89703b0c4d6c6f7"} Jan 05 22:14:34 crc kubenswrapper[4910]: I0105 22:14:34.820078 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-5dg7h" Jan 05 22:14:34 crc kubenswrapper[4910]: I0105 22:14:34.904730 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s98rk\" (UniqueName: \"kubernetes.io/projected/dda642c8-96ed-4c08-be87-119551bcd735-kube-api-access-s98rk\") pod \"dda642c8-96ed-4c08-be87-119551bcd735\" (UID: \"dda642c8-96ed-4c08-be87-119551bcd735\") " Jan 05 22:14:34 crc kubenswrapper[4910]: I0105 22:14:34.904916 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dda642c8-96ed-4c08-be87-119551bcd735-config-data\") pod \"dda642c8-96ed-4c08-be87-119551bcd735\" (UID: \"dda642c8-96ed-4c08-be87-119551bcd735\") " Jan 05 22:14:34 crc kubenswrapper[4910]: I0105 22:14:34.904946 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dda642c8-96ed-4c08-be87-119551bcd735-combined-ca-bundle\") pod \"dda642c8-96ed-4c08-be87-119551bcd735\" (UID: \"dda642c8-96ed-4c08-be87-119551bcd735\") " Jan 05 22:14:34 crc kubenswrapper[4910]: I0105 22:14:34.905006 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dda642c8-96ed-4c08-be87-119551bcd735-scripts\") pod \"dda642c8-96ed-4c08-be87-119551bcd735\" (UID: \"dda642c8-96ed-4c08-be87-119551bcd735\") " Jan 05 22:14:34 crc kubenswrapper[4910]: I0105 22:14:34.925819 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dda642c8-96ed-4c08-be87-119551bcd735-kube-api-access-s98rk" (OuterVolumeSpecName: "kube-api-access-s98rk") pod "dda642c8-96ed-4c08-be87-119551bcd735" (UID: "dda642c8-96ed-4c08-be87-119551bcd735"). InnerVolumeSpecName "kube-api-access-s98rk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:14:34 crc kubenswrapper[4910]: I0105 22:14:34.940363 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dda642c8-96ed-4c08-be87-119551bcd735-scripts" (OuterVolumeSpecName: "scripts") pod "dda642c8-96ed-4c08-be87-119551bcd735" (UID: "dda642c8-96ed-4c08-be87-119551bcd735"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:14:34 crc kubenswrapper[4910]: I0105 22:14:34.956311 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dda642c8-96ed-4c08-be87-119551bcd735-config-data" (OuterVolumeSpecName: "config-data") pod "dda642c8-96ed-4c08-be87-119551bcd735" (UID: "dda642c8-96ed-4c08-be87-119551bcd735"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:14:34 crc kubenswrapper[4910]: I0105 22:14:34.995572 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dda642c8-96ed-4c08-be87-119551bcd735-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dda642c8-96ed-4c08-be87-119551bcd735" (UID: "dda642c8-96ed-4c08-be87-119551bcd735"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:14:35 crc kubenswrapper[4910]: I0105 22:14:35.008580 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dda642c8-96ed-4c08-be87-119551bcd735-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:35 crc kubenswrapper[4910]: I0105 22:14:35.008633 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dda642c8-96ed-4c08-be87-119551bcd735-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:35 crc kubenswrapper[4910]: I0105 22:14:35.008649 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dda642c8-96ed-4c08-be87-119551bcd735-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:35 crc kubenswrapper[4910]: I0105 22:14:35.008662 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s98rk\" (UniqueName: \"kubernetes.io/projected/dda642c8-96ed-4c08-be87-119551bcd735-kube-api-access-s98rk\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:35 crc kubenswrapper[4910]: I0105 22:14:35.439597 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-5dg7h" event={"ID":"dda642c8-96ed-4c08-be87-119551bcd735","Type":"ContainerDied","Data":"a4b97f12c0b72322cb4a13eafa7ad96d7b50ae95878d6b71e3c1a97f51fb20f9"} Jan 05 22:14:35 crc kubenswrapper[4910]: I0105 22:14:35.439649 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a4b97f12c0b72322cb4a13eafa7ad96d7b50ae95878d6b71e3c1a97f51fb20f9" Jan 05 22:14:35 crc kubenswrapper[4910]: I0105 22:14:35.440026 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-5dg7h" Jan 05 22:14:35 crc kubenswrapper[4910]: I0105 22:14:35.626158 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 05 22:14:35 crc kubenswrapper[4910]: I0105 22:14:35.626490 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="b51a1e52-8fcf-4cc5-9f3d-1544dc64f997" containerName="nova-api-log" containerID="cri-o://9c4645ac71fd926e93ce1dbed7cebdc8397b1f3e129ac334c6a8f7649338013c" gracePeriod=30 Jan 05 22:14:35 crc kubenswrapper[4910]: I0105 22:14:35.626573 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="b51a1e52-8fcf-4cc5-9f3d-1544dc64f997" containerName="nova-api-api" containerID="cri-o://225fd4ef33468515e6f1bf9d22c98bd24fe8cd3c7cb7ae5d958d12e49f917cf1" gracePeriod=30 Jan 05 22:14:35 crc kubenswrapper[4910]: I0105 22:14:35.640325 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 22:14:35 crc kubenswrapper[4910]: I0105 22:14:35.640675 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="9dedaba8-f1c7-4b13-a5c5-78b2ead1753c" containerName="nova-scheduler-scheduler" containerID="cri-o://10869218f49d1497aa8b4413fabc4bce2443981d8eca4525be5a59927d56345d" gracePeriod=30 Jan 05 22:14:35 crc kubenswrapper[4910]: I0105 22:14:35.740367 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 22:14:35 crc kubenswrapper[4910]: I0105 22:14:35.742393 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="640b0e1e-49a8-4daf-899f-c1a7ab82e976" containerName="nova-metadata-log" containerID="cri-o://e752583795fe468de9af6eb52c315e5c49f57548b9b236bdb28a3bb75692ebb7" gracePeriod=30 Jan 05 22:14:35 crc kubenswrapper[4910]: I0105 22:14:35.743217 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="640b0e1e-49a8-4daf-899f-c1a7ab82e976" containerName="nova-metadata-metadata" containerID="cri-o://05e89eb9ca56e3ebe59045f592314e58faf89eea75b4fe0a9ff2a77177a668a3" gracePeriod=30 Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.167348 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.237330 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-combined-ca-bundle\") pod \"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997\" (UID: \"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997\") " Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.237403 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dsvv2\" (UniqueName: \"kubernetes.io/projected/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-kube-api-access-dsvv2\") pod \"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997\" (UID: \"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997\") " Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.237429 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-public-tls-certs\") pod \"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997\" (UID: \"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997\") " Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.237540 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-config-data\") pod \"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997\" (UID: \"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997\") " Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.237583 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-logs\") pod \"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997\" (UID: \"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997\") " Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.237656 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-internal-tls-certs\") pod \"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997\" (UID: \"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997\") " Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.238022 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-logs" (OuterVolumeSpecName: "logs") pod "b51a1e52-8fcf-4cc5-9f3d-1544dc64f997" (UID: "b51a1e52-8fcf-4cc5-9f3d-1544dc64f997"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.245361 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-kube-api-access-dsvv2" (OuterVolumeSpecName: "kube-api-access-dsvv2") pod "b51a1e52-8fcf-4cc5-9f3d-1544dc64f997" (UID: "b51a1e52-8fcf-4cc5-9f3d-1544dc64f997"). InnerVolumeSpecName "kube-api-access-dsvv2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.272338 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-config-data" (OuterVolumeSpecName: "config-data") pod "b51a1e52-8fcf-4cc5-9f3d-1544dc64f997" (UID: "b51a1e52-8fcf-4cc5-9f3d-1544dc64f997"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.272920 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b51a1e52-8fcf-4cc5-9f3d-1544dc64f997" (UID: "b51a1e52-8fcf-4cc5-9f3d-1544dc64f997"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.298502 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "b51a1e52-8fcf-4cc5-9f3d-1544dc64f997" (UID: "b51a1e52-8fcf-4cc5-9f3d-1544dc64f997"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.298871 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "b51a1e52-8fcf-4cc5-9f3d-1544dc64f997" (UID: "b51a1e52-8fcf-4cc5-9f3d-1544dc64f997"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.339847 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.339876 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dsvv2\" (UniqueName: \"kubernetes.io/projected/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-kube-api-access-dsvv2\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.339887 4910 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.339899 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.339908 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-logs\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.339916 4910 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.449897 4910 generic.go:334] "Generic (PLEG): container finished" podID="640b0e1e-49a8-4daf-899f-c1a7ab82e976" containerID="e752583795fe468de9af6eb52c315e5c49f57548b9b236bdb28a3bb75692ebb7" exitCode=143 Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.449975 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"640b0e1e-49a8-4daf-899f-c1a7ab82e976","Type":"ContainerDied","Data":"e752583795fe468de9af6eb52c315e5c49f57548b9b236bdb28a3bb75692ebb7"} Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.451496 4910 generic.go:334] "Generic (PLEG): container finished" podID="b51a1e52-8fcf-4cc5-9f3d-1544dc64f997" containerID="225fd4ef33468515e6f1bf9d22c98bd24fe8cd3c7cb7ae5d958d12e49f917cf1" exitCode=0 Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.451520 4910 generic.go:334] "Generic (PLEG): container finished" podID="b51a1e52-8fcf-4cc5-9f3d-1544dc64f997" containerID="9c4645ac71fd926e93ce1dbed7cebdc8397b1f3e129ac334c6a8f7649338013c" exitCode=143 Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.451539 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997","Type":"ContainerDied","Data":"225fd4ef33468515e6f1bf9d22c98bd24fe8cd3c7cb7ae5d958d12e49f917cf1"} Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.451561 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997","Type":"ContainerDied","Data":"9c4645ac71fd926e93ce1dbed7cebdc8397b1f3e129ac334c6a8f7649338013c"} Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.451574 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b51a1e52-8fcf-4cc5-9f3d-1544dc64f997","Type":"ContainerDied","Data":"40b52dcc429ddab35497d594e56fdfcba6fef7f1067b4c6358ec97da65a6d135"} Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.451593 4910 scope.go:117] "RemoveContainer" containerID="225fd4ef33468515e6f1bf9d22c98bd24fe8cd3c7cb7ae5d958d12e49f917cf1" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.451729 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.495670 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.508766 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.508954 4910 scope.go:117] "RemoveContainer" containerID="9c4645ac71fd926e93ce1dbed7cebdc8397b1f3e129ac334c6a8f7649338013c" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.518410 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 05 22:14:36 crc kubenswrapper[4910]: E0105 22:14:36.519147 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dda642c8-96ed-4c08-be87-119551bcd735" containerName="nova-manage" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.519172 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="dda642c8-96ed-4c08-be87-119551bcd735" containerName="nova-manage" Jan 05 22:14:36 crc kubenswrapper[4910]: E0105 22:14:36.519191 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b51a1e52-8fcf-4cc5-9f3d-1544dc64f997" containerName="nova-api-log" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.519200 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b51a1e52-8fcf-4cc5-9f3d-1544dc64f997" containerName="nova-api-log" Jan 05 22:14:36 crc kubenswrapper[4910]: E0105 22:14:36.519223 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a447ec9-7c46-472b-af0a-1c0633e4abf2" containerName="init" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.519231 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a447ec9-7c46-472b-af0a-1c0633e4abf2" containerName="init" Jan 05 22:14:36 crc kubenswrapper[4910]: E0105 22:14:36.519259 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b51a1e52-8fcf-4cc5-9f3d-1544dc64f997" containerName="nova-api-api" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.519268 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b51a1e52-8fcf-4cc5-9f3d-1544dc64f997" containerName="nova-api-api" Jan 05 22:14:36 crc kubenswrapper[4910]: E0105 22:14:36.519288 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a447ec9-7c46-472b-af0a-1c0633e4abf2" containerName="dnsmasq-dns" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.519296 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a447ec9-7c46-472b-af0a-1c0633e4abf2" containerName="dnsmasq-dns" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.519527 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a447ec9-7c46-472b-af0a-1c0633e4abf2" containerName="dnsmasq-dns" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.519548 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="b51a1e52-8fcf-4cc5-9f3d-1544dc64f997" containerName="nova-api-api" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.519561 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="dda642c8-96ed-4c08-be87-119551bcd735" containerName="nova-manage" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.519580 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="b51a1e52-8fcf-4cc5-9f3d-1544dc64f997" containerName="nova-api-log" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.521019 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.523724 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.524019 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.524344 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.548486 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.549496 4910 scope.go:117] "RemoveContainer" containerID="225fd4ef33468515e6f1bf9d22c98bd24fe8cd3c7cb7ae5d958d12e49f917cf1" Jan 05 22:14:36 crc kubenswrapper[4910]: E0105 22:14:36.550014 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"225fd4ef33468515e6f1bf9d22c98bd24fe8cd3c7cb7ae5d958d12e49f917cf1\": container with ID starting with 225fd4ef33468515e6f1bf9d22c98bd24fe8cd3c7cb7ae5d958d12e49f917cf1 not found: ID does not exist" containerID="225fd4ef33468515e6f1bf9d22c98bd24fe8cd3c7cb7ae5d958d12e49f917cf1" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.550047 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"225fd4ef33468515e6f1bf9d22c98bd24fe8cd3c7cb7ae5d958d12e49f917cf1"} err="failed to get container status \"225fd4ef33468515e6f1bf9d22c98bd24fe8cd3c7cb7ae5d958d12e49f917cf1\": rpc error: code = NotFound desc = could not find container \"225fd4ef33468515e6f1bf9d22c98bd24fe8cd3c7cb7ae5d958d12e49f917cf1\": container with ID starting with 225fd4ef33468515e6f1bf9d22c98bd24fe8cd3c7cb7ae5d958d12e49f917cf1 not found: ID does not exist" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.550074 4910 scope.go:117] "RemoveContainer" containerID="9c4645ac71fd926e93ce1dbed7cebdc8397b1f3e129ac334c6a8f7649338013c" Jan 05 22:14:36 crc kubenswrapper[4910]: E0105 22:14:36.550558 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9c4645ac71fd926e93ce1dbed7cebdc8397b1f3e129ac334c6a8f7649338013c\": container with ID starting with 9c4645ac71fd926e93ce1dbed7cebdc8397b1f3e129ac334c6a8f7649338013c not found: ID does not exist" containerID="9c4645ac71fd926e93ce1dbed7cebdc8397b1f3e129ac334c6a8f7649338013c" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.550589 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c4645ac71fd926e93ce1dbed7cebdc8397b1f3e129ac334c6a8f7649338013c"} err="failed to get container status \"9c4645ac71fd926e93ce1dbed7cebdc8397b1f3e129ac334c6a8f7649338013c\": rpc error: code = NotFound desc = could not find container \"9c4645ac71fd926e93ce1dbed7cebdc8397b1f3e129ac334c6a8f7649338013c\": container with ID starting with 9c4645ac71fd926e93ce1dbed7cebdc8397b1f3e129ac334c6a8f7649338013c not found: ID does not exist" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.550604 4910 scope.go:117] "RemoveContainer" containerID="225fd4ef33468515e6f1bf9d22c98bd24fe8cd3c7cb7ae5d958d12e49f917cf1" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.550945 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"225fd4ef33468515e6f1bf9d22c98bd24fe8cd3c7cb7ae5d958d12e49f917cf1"} err="failed to get container status \"225fd4ef33468515e6f1bf9d22c98bd24fe8cd3c7cb7ae5d958d12e49f917cf1\": rpc error: code = NotFound desc = could not find container \"225fd4ef33468515e6f1bf9d22c98bd24fe8cd3c7cb7ae5d958d12e49f917cf1\": container with ID starting with 225fd4ef33468515e6f1bf9d22c98bd24fe8cd3c7cb7ae5d958d12e49f917cf1 not found: ID does not exist" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.550964 4910 scope.go:117] "RemoveContainer" containerID="9c4645ac71fd926e93ce1dbed7cebdc8397b1f3e129ac334c6a8f7649338013c" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.551356 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c4645ac71fd926e93ce1dbed7cebdc8397b1f3e129ac334c6a8f7649338013c"} err="failed to get container status \"9c4645ac71fd926e93ce1dbed7cebdc8397b1f3e129ac334c6a8f7649338013c\": rpc error: code = NotFound desc = could not find container \"9c4645ac71fd926e93ce1dbed7cebdc8397b1f3e129ac334c6a8f7649338013c\": container with ID starting with 9c4645ac71fd926e93ce1dbed7cebdc8397b1f3e129ac334c6a8f7649338013c not found: ID does not exist" Jan 05 22:14:36 crc kubenswrapper[4910]: E0105 22:14:36.631092 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="10869218f49d1497aa8b4413fabc4bce2443981d8eca4525be5a59927d56345d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 05 22:14:36 crc kubenswrapper[4910]: E0105 22:14:36.633918 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="10869218f49d1497aa8b4413fabc4bce2443981d8eca4525be5a59927d56345d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 05 22:14:36 crc kubenswrapper[4910]: E0105 22:14:36.638419 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="10869218f49d1497aa8b4413fabc4bce2443981d8eca4525be5a59927d56345d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 05 22:14:36 crc kubenswrapper[4910]: E0105 22:14:36.638471 4910 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="9dedaba8-f1c7-4b13-a5c5-78b2ead1753c" containerName="nova-scheduler-scheduler" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.646208 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf7e2b20-58e5-4c61-9e50-c1af51acf521-internal-tls-certs\") pod \"nova-api-0\" (UID: \"cf7e2b20-58e5-4c61-9e50-c1af51acf521\") " pod="openstack/nova-api-0" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.646295 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf7e2b20-58e5-4c61-9e50-c1af51acf521-public-tls-certs\") pod \"nova-api-0\" (UID: \"cf7e2b20-58e5-4c61-9e50-c1af51acf521\") " pod="openstack/nova-api-0" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.646384 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2nlzn\" (UniqueName: \"kubernetes.io/projected/cf7e2b20-58e5-4c61-9e50-c1af51acf521-kube-api-access-2nlzn\") pod \"nova-api-0\" (UID: \"cf7e2b20-58e5-4c61-9e50-c1af51acf521\") " pod="openstack/nova-api-0" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.646422 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf7e2b20-58e5-4c61-9e50-c1af51acf521-config-data\") pod \"nova-api-0\" (UID: \"cf7e2b20-58e5-4c61-9e50-c1af51acf521\") " pod="openstack/nova-api-0" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.646702 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7e2b20-58e5-4c61-9e50-c1af51acf521-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cf7e2b20-58e5-4c61-9e50-c1af51acf521\") " pod="openstack/nova-api-0" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.646803 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cf7e2b20-58e5-4c61-9e50-c1af51acf521-logs\") pod \"nova-api-0\" (UID: \"cf7e2b20-58e5-4c61-9e50-c1af51acf521\") " pod="openstack/nova-api-0" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.735113 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b51a1e52-8fcf-4cc5-9f3d-1544dc64f997" path="/var/lib/kubelet/pods/b51a1e52-8fcf-4cc5-9f3d-1544dc64f997/volumes" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.748792 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf7e2b20-58e5-4c61-9e50-c1af51acf521-internal-tls-certs\") pod \"nova-api-0\" (UID: \"cf7e2b20-58e5-4c61-9e50-c1af51acf521\") " pod="openstack/nova-api-0" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.748911 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf7e2b20-58e5-4c61-9e50-c1af51acf521-public-tls-certs\") pod \"nova-api-0\" (UID: \"cf7e2b20-58e5-4c61-9e50-c1af51acf521\") " pod="openstack/nova-api-0" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.748958 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2nlzn\" (UniqueName: \"kubernetes.io/projected/cf7e2b20-58e5-4c61-9e50-c1af51acf521-kube-api-access-2nlzn\") pod \"nova-api-0\" (UID: \"cf7e2b20-58e5-4c61-9e50-c1af51acf521\") " pod="openstack/nova-api-0" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.749004 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf7e2b20-58e5-4c61-9e50-c1af51acf521-config-data\") pod \"nova-api-0\" (UID: \"cf7e2b20-58e5-4c61-9e50-c1af51acf521\") " pod="openstack/nova-api-0" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.749078 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7e2b20-58e5-4c61-9e50-c1af51acf521-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cf7e2b20-58e5-4c61-9e50-c1af51acf521\") " pod="openstack/nova-api-0" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.749104 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cf7e2b20-58e5-4c61-9e50-c1af51acf521-logs\") pod \"nova-api-0\" (UID: \"cf7e2b20-58e5-4c61-9e50-c1af51acf521\") " pod="openstack/nova-api-0" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.749830 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cf7e2b20-58e5-4c61-9e50-c1af51acf521-logs\") pod \"nova-api-0\" (UID: \"cf7e2b20-58e5-4c61-9e50-c1af51acf521\") " pod="openstack/nova-api-0" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.754705 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf7e2b20-58e5-4c61-9e50-c1af51acf521-config-data\") pod \"nova-api-0\" (UID: \"cf7e2b20-58e5-4c61-9e50-c1af51acf521\") " pod="openstack/nova-api-0" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.762530 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf7e2b20-58e5-4c61-9e50-c1af51acf521-public-tls-certs\") pod \"nova-api-0\" (UID: \"cf7e2b20-58e5-4c61-9e50-c1af51acf521\") " pod="openstack/nova-api-0" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.762873 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf7e2b20-58e5-4c61-9e50-c1af51acf521-internal-tls-certs\") pod \"nova-api-0\" (UID: \"cf7e2b20-58e5-4c61-9e50-c1af51acf521\") " pod="openstack/nova-api-0" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.765770 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7e2b20-58e5-4c61-9e50-c1af51acf521-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cf7e2b20-58e5-4c61-9e50-c1af51acf521\") " pod="openstack/nova-api-0" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.771646 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2nlzn\" (UniqueName: \"kubernetes.io/projected/cf7e2b20-58e5-4c61-9e50-c1af51acf521-kube-api-access-2nlzn\") pod \"nova-api-0\" (UID: \"cf7e2b20-58e5-4c61-9e50-c1af51acf521\") " pod="openstack/nova-api-0" Jan 05 22:14:36 crc kubenswrapper[4910]: I0105 22:14:36.856333 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 05 22:14:37 crc kubenswrapper[4910]: I0105 22:14:37.284089 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 05 22:14:37 crc kubenswrapper[4910]: W0105 22:14:37.287783 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcf7e2b20_58e5_4c61_9e50_c1af51acf521.slice/crio-f6c7e7a51d1e1d303d4bb88e273d5cb33cbf601c7df231262ca982405886341a WatchSource:0}: Error finding container f6c7e7a51d1e1d303d4bb88e273d5cb33cbf601c7df231262ca982405886341a: Status 404 returned error can't find the container with id f6c7e7a51d1e1d303d4bb88e273d5cb33cbf601c7df231262ca982405886341a Jan 05 22:14:37 crc kubenswrapper[4910]: I0105 22:14:37.464608 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cf7e2b20-58e5-4c61-9e50-c1af51acf521","Type":"ContainerStarted","Data":"b5b23f1d39fd87015c972670711fa8663521d44165624ef12201ef9f0c36a505"} Jan 05 22:14:37 crc kubenswrapper[4910]: I0105 22:14:37.464999 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cf7e2b20-58e5-4c61-9e50-c1af51acf521","Type":"ContainerStarted","Data":"f6c7e7a51d1e1d303d4bb88e273d5cb33cbf601c7df231262ca982405886341a"} Jan 05 22:14:38 crc kubenswrapper[4910]: I0105 22:14:38.478147 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cf7e2b20-58e5-4c61-9e50-c1af51acf521","Type":"ContainerStarted","Data":"d7aecd1f8fe9c5ffa799f574329b9dee47f4fd0d6129def71457d2e4db819834"} Jan 05 22:14:38 crc kubenswrapper[4910]: I0105 22:14:38.519547 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.51951335 podStartE2EDuration="2.51951335s" podCreationTimestamp="2026-01-05 22:14:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:14:38.499062842 +0000 UTC m=+1410.076560552" watchObservedRunningTime="2026-01-05 22:14:38.51951335 +0000 UTC m=+1410.097011050" Jan 05 22:14:39 crc kubenswrapper[4910]: I0105 22:14:39.068876 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="640b0e1e-49a8-4daf-899f-c1a7ab82e976" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.191:8775/\": read tcp 10.217.0.2:60488->10.217.0.191:8775: read: connection reset by peer" Jan 05 22:14:39 crc kubenswrapper[4910]: I0105 22:14:39.069035 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="640b0e1e-49a8-4daf-899f-c1a7ab82e976" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.191:8775/\": read tcp 10.217.0.2:60486->10.217.0.191:8775: read: connection reset by peer" Jan 05 22:14:39 crc kubenswrapper[4910]: I0105 22:14:39.499260 4910 generic.go:334] "Generic (PLEG): container finished" podID="640b0e1e-49a8-4daf-899f-c1a7ab82e976" containerID="05e89eb9ca56e3ebe59045f592314e58faf89eea75b4fe0a9ff2a77177a668a3" exitCode=0 Jan 05 22:14:39 crc kubenswrapper[4910]: I0105 22:14:39.499419 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"640b0e1e-49a8-4daf-899f-c1a7ab82e976","Type":"ContainerDied","Data":"05e89eb9ca56e3ebe59045f592314e58faf89eea75b4fe0a9ff2a77177a668a3"} Jan 05 22:14:39 crc kubenswrapper[4910]: I0105 22:14:39.499987 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"640b0e1e-49a8-4daf-899f-c1a7ab82e976","Type":"ContainerDied","Data":"d82880f81a4b6c5266f95426d7d47847b24bcd4ad74b4f499b037e34e179b45a"} Jan 05 22:14:39 crc kubenswrapper[4910]: I0105 22:14:39.500002 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d82880f81a4b6c5266f95426d7d47847b24bcd4ad74b4f499b037e34e179b45a" Jan 05 22:14:39 crc kubenswrapper[4910]: I0105 22:14:39.572633 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 05 22:14:39 crc kubenswrapper[4910]: I0105 22:14:39.714579 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/640b0e1e-49a8-4daf-899f-c1a7ab82e976-config-data\") pod \"640b0e1e-49a8-4daf-899f-c1a7ab82e976\" (UID: \"640b0e1e-49a8-4daf-899f-c1a7ab82e976\") " Jan 05 22:14:39 crc kubenswrapper[4910]: I0105 22:14:39.714811 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/640b0e1e-49a8-4daf-899f-c1a7ab82e976-combined-ca-bundle\") pod \"640b0e1e-49a8-4daf-899f-c1a7ab82e976\" (UID: \"640b0e1e-49a8-4daf-899f-c1a7ab82e976\") " Jan 05 22:14:39 crc kubenswrapper[4910]: I0105 22:14:39.714862 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/640b0e1e-49a8-4daf-899f-c1a7ab82e976-logs\") pod \"640b0e1e-49a8-4daf-899f-c1a7ab82e976\" (UID: \"640b0e1e-49a8-4daf-899f-c1a7ab82e976\") " Jan 05 22:14:39 crc kubenswrapper[4910]: I0105 22:14:39.714893 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/640b0e1e-49a8-4daf-899f-c1a7ab82e976-nova-metadata-tls-certs\") pod \"640b0e1e-49a8-4daf-899f-c1a7ab82e976\" (UID: \"640b0e1e-49a8-4daf-899f-c1a7ab82e976\") " Jan 05 22:14:39 crc kubenswrapper[4910]: I0105 22:14:39.714980 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pk8h8\" (UniqueName: \"kubernetes.io/projected/640b0e1e-49a8-4daf-899f-c1a7ab82e976-kube-api-access-pk8h8\") pod \"640b0e1e-49a8-4daf-899f-c1a7ab82e976\" (UID: \"640b0e1e-49a8-4daf-899f-c1a7ab82e976\") " Jan 05 22:14:39 crc kubenswrapper[4910]: I0105 22:14:39.715919 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/640b0e1e-49a8-4daf-899f-c1a7ab82e976-logs" (OuterVolumeSpecName: "logs") pod "640b0e1e-49a8-4daf-899f-c1a7ab82e976" (UID: "640b0e1e-49a8-4daf-899f-c1a7ab82e976"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:14:39 crc kubenswrapper[4910]: I0105 22:14:39.722589 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/640b0e1e-49a8-4daf-899f-c1a7ab82e976-kube-api-access-pk8h8" (OuterVolumeSpecName: "kube-api-access-pk8h8") pod "640b0e1e-49a8-4daf-899f-c1a7ab82e976" (UID: "640b0e1e-49a8-4daf-899f-c1a7ab82e976"). InnerVolumeSpecName "kube-api-access-pk8h8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:14:39 crc kubenswrapper[4910]: I0105 22:14:39.745199 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/640b0e1e-49a8-4daf-899f-c1a7ab82e976-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "640b0e1e-49a8-4daf-899f-c1a7ab82e976" (UID: "640b0e1e-49a8-4daf-899f-c1a7ab82e976"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:14:39 crc kubenswrapper[4910]: I0105 22:14:39.751484 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/640b0e1e-49a8-4daf-899f-c1a7ab82e976-config-data" (OuterVolumeSpecName: "config-data") pod "640b0e1e-49a8-4daf-899f-c1a7ab82e976" (UID: "640b0e1e-49a8-4daf-899f-c1a7ab82e976"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:14:39 crc kubenswrapper[4910]: I0105 22:14:39.788343 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/640b0e1e-49a8-4daf-899f-c1a7ab82e976-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "640b0e1e-49a8-4daf-899f-c1a7ab82e976" (UID: "640b0e1e-49a8-4daf-899f-c1a7ab82e976"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:14:39 crc kubenswrapper[4910]: I0105 22:14:39.817405 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/640b0e1e-49a8-4daf-899f-c1a7ab82e976-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:39 crc kubenswrapper[4910]: I0105 22:14:39.817450 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/640b0e1e-49a8-4daf-899f-c1a7ab82e976-logs\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:39 crc kubenswrapper[4910]: I0105 22:14:39.817461 4910 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/640b0e1e-49a8-4daf-899f-c1a7ab82e976-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:39 crc kubenswrapper[4910]: I0105 22:14:39.817475 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pk8h8\" (UniqueName: \"kubernetes.io/projected/640b0e1e-49a8-4daf-899f-c1a7ab82e976-kube-api-access-pk8h8\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:39 crc kubenswrapper[4910]: I0105 22:14:39.817484 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/640b0e1e-49a8-4daf-899f-c1a7ab82e976-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:40 crc kubenswrapper[4910]: I0105 22:14:40.512534 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 05 22:14:40 crc kubenswrapper[4910]: I0105 22:14:40.568409 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 22:14:40 crc kubenswrapper[4910]: I0105 22:14:40.576182 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 22:14:40 crc kubenswrapper[4910]: I0105 22:14:40.607205 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 05 22:14:40 crc kubenswrapper[4910]: E0105 22:14:40.607760 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="640b0e1e-49a8-4daf-899f-c1a7ab82e976" containerName="nova-metadata-metadata" Jan 05 22:14:40 crc kubenswrapper[4910]: I0105 22:14:40.607788 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="640b0e1e-49a8-4daf-899f-c1a7ab82e976" containerName="nova-metadata-metadata" Jan 05 22:14:40 crc kubenswrapper[4910]: E0105 22:14:40.607809 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="640b0e1e-49a8-4daf-899f-c1a7ab82e976" containerName="nova-metadata-log" Jan 05 22:14:40 crc kubenswrapper[4910]: I0105 22:14:40.607816 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="640b0e1e-49a8-4daf-899f-c1a7ab82e976" containerName="nova-metadata-log" Jan 05 22:14:40 crc kubenswrapper[4910]: I0105 22:14:40.608012 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="640b0e1e-49a8-4daf-899f-c1a7ab82e976" containerName="nova-metadata-metadata" Jan 05 22:14:40 crc kubenswrapper[4910]: I0105 22:14:40.608047 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="640b0e1e-49a8-4daf-899f-c1a7ab82e976" containerName="nova-metadata-log" Jan 05 22:14:40 crc kubenswrapper[4910]: I0105 22:14:40.609051 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 05 22:14:40 crc kubenswrapper[4910]: I0105 22:14:40.618305 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 05 22:14:40 crc kubenswrapper[4910]: I0105 22:14:40.618505 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 05 22:14:40 crc kubenswrapper[4910]: I0105 22:14:40.623340 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 22:14:40 crc kubenswrapper[4910]: I0105 22:14:40.733314 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="640b0e1e-49a8-4daf-899f-c1a7ab82e976" path="/var/lib/kubelet/pods/640b0e1e-49a8-4daf-899f-c1a7ab82e976/volumes" Jan 05 22:14:40 crc kubenswrapper[4910]: I0105 22:14:40.734445 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/3486557d-93f8-44c2-b40a-dd8aca19d8e1-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"3486557d-93f8-44c2-b40a-dd8aca19d8e1\") " pod="openstack/nova-metadata-0" Jan 05 22:14:40 crc kubenswrapper[4910]: I0105 22:14:40.734501 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9srqw\" (UniqueName: \"kubernetes.io/projected/3486557d-93f8-44c2-b40a-dd8aca19d8e1-kube-api-access-9srqw\") pod \"nova-metadata-0\" (UID: \"3486557d-93f8-44c2-b40a-dd8aca19d8e1\") " pod="openstack/nova-metadata-0" Jan 05 22:14:40 crc kubenswrapper[4910]: I0105 22:14:40.734543 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3486557d-93f8-44c2-b40a-dd8aca19d8e1-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"3486557d-93f8-44c2-b40a-dd8aca19d8e1\") " pod="openstack/nova-metadata-0" Jan 05 22:14:40 crc kubenswrapper[4910]: I0105 22:14:40.734662 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3486557d-93f8-44c2-b40a-dd8aca19d8e1-config-data\") pod \"nova-metadata-0\" (UID: \"3486557d-93f8-44c2-b40a-dd8aca19d8e1\") " pod="openstack/nova-metadata-0" Jan 05 22:14:40 crc kubenswrapper[4910]: I0105 22:14:40.734708 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3486557d-93f8-44c2-b40a-dd8aca19d8e1-logs\") pod \"nova-metadata-0\" (UID: \"3486557d-93f8-44c2-b40a-dd8aca19d8e1\") " pod="openstack/nova-metadata-0" Jan 05 22:14:40 crc kubenswrapper[4910]: I0105 22:14:40.836618 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3486557d-93f8-44c2-b40a-dd8aca19d8e1-config-data\") pod \"nova-metadata-0\" (UID: \"3486557d-93f8-44c2-b40a-dd8aca19d8e1\") " pod="openstack/nova-metadata-0" Jan 05 22:14:40 crc kubenswrapper[4910]: I0105 22:14:40.836721 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3486557d-93f8-44c2-b40a-dd8aca19d8e1-logs\") pod \"nova-metadata-0\" (UID: \"3486557d-93f8-44c2-b40a-dd8aca19d8e1\") " pod="openstack/nova-metadata-0" Jan 05 22:14:40 crc kubenswrapper[4910]: I0105 22:14:40.836839 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/3486557d-93f8-44c2-b40a-dd8aca19d8e1-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"3486557d-93f8-44c2-b40a-dd8aca19d8e1\") " pod="openstack/nova-metadata-0" Jan 05 22:14:40 crc kubenswrapper[4910]: I0105 22:14:40.836887 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9srqw\" (UniqueName: \"kubernetes.io/projected/3486557d-93f8-44c2-b40a-dd8aca19d8e1-kube-api-access-9srqw\") pod \"nova-metadata-0\" (UID: \"3486557d-93f8-44c2-b40a-dd8aca19d8e1\") " pod="openstack/nova-metadata-0" Jan 05 22:14:40 crc kubenswrapper[4910]: I0105 22:14:40.836925 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3486557d-93f8-44c2-b40a-dd8aca19d8e1-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"3486557d-93f8-44c2-b40a-dd8aca19d8e1\") " pod="openstack/nova-metadata-0" Jan 05 22:14:40 crc kubenswrapper[4910]: I0105 22:14:40.837278 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3486557d-93f8-44c2-b40a-dd8aca19d8e1-logs\") pod \"nova-metadata-0\" (UID: \"3486557d-93f8-44c2-b40a-dd8aca19d8e1\") " pod="openstack/nova-metadata-0" Jan 05 22:14:40 crc kubenswrapper[4910]: I0105 22:14:40.842850 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/3486557d-93f8-44c2-b40a-dd8aca19d8e1-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"3486557d-93f8-44c2-b40a-dd8aca19d8e1\") " pod="openstack/nova-metadata-0" Jan 05 22:14:40 crc kubenswrapper[4910]: I0105 22:14:40.843734 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3486557d-93f8-44c2-b40a-dd8aca19d8e1-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"3486557d-93f8-44c2-b40a-dd8aca19d8e1\") " pod="openstack/nova-metadata-0" Jan 05 22:14:40 crc kubenswrapper[4910]: I0105 22:14:40.853193 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3486557d-93f8-44c2-b40a-dd8aca19d8e1-config-data\") pod \"nova-metadata-0\" (UID: \"3486557d-93f8-44c2-b40a-dd8aca19d8e1\") " pod="openstack/nova-metadata-0" Jan 05 22:14:40 crc kubenswrapper[4910]: I0105 22:14:40.871198 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9srqw\" (UniqueName: \"kubernetes.io/projected/3486557d-93f8-44c2-b40a-dd8aca19d8e1-kube-api-access-9srqw\") pod \"nova-metadata-0\" (UID: \"3486557d-93f8-44c2-b40a-dd8aca19d8e1\") " pod="openstack/nova-metadata-0" Jan 05 22:14:40 crc kubenswrapper[4910]: I0105 22:14:40.935149 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 05 22:14:40 crc kubenswrapper[4910]: I0105 22:14:40.951995 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:14:40 crc kubenswrapper[4910]: I0105 22:14:40.952057 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:14:41 crc kubenswrapper[4910]: I0105 22:14:41.406230 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 22:14:41 crc kubenswrapper[4910]: W0105 22:14:41.410869 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3486557d_93f8_44c2_b40a_dd8aca19d8e1.slice/crio-b66c4ba08833e050e588a54e883f9d8d4263532a4489d36589833f105def0349 WatchSource:0}: Error finding container b66c4ba08833e050e588a54e883f9d8d4263532a4489d36589833f105def0349: Status 404 returned error can't find the container with id b66c4ba08833e050e588a54e883f9d8d4263532a4489d36589833f105def0349 Jan 05 22:14:41 crc kubenswrapper[4910]: I0105 22:14:41.526547 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3486557d-93f8-44c2-b40a-dd8aca19d8e1","Type":"ContainerStarted","Data":"b66c4ba08833e050e588a54e883f9d8d4263532a4489d36589833f105def0349"} Jan 05 22:14:41 crc kubenswrapper[4910]: I0105 22:14:41.529519 4910 generic.go:334] "Generic (PLEG): container finished" podID="9dedaba8-f1c7-4b13-a5c5-78b2ead1753c" containerID="10869218f49d1497aa8b4413fabc4bce2443981d8eca4525be5a59927d56345d" exitCode=0 Jan 05 22:14:41 crc kubenswrapper[4910]: I0105 22:14:41.529596 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9dedaba8-f1c7-4b13-a5c5-78b2ead1753c","Type":"ContainerDied","Data":"10869218f49d1497aa8b4413fabc4bce2443981d8eca4525be5a59927d56345d"} Jan 05 22:14:41 crc kubenswrapper[4910]: I0105 22:14:41.529643 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9dedaba8-f1c7-4b13-a5c5-78b2ead1753c","Type":"ContainerDied","Data":"0c263c31e48774f2eb04c9c74b29d692587996bd7b3a0e1601d8ae8201c3b616"} Jan 05 22:14:41 crc kubenswrapper[4910]: I0105 22:14:41.529663 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0c263c31e48774f2eb04c9c74b29d692587996bd7b3a0e1601d8ae8201c3b616" Jan 05 22:14:41 crc kubenswrapper[4910]: I0105 22:14:41.563320 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 05 22:14:41 crc kubenswrapper[4910]: I0105 22:14:41.652321 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k6w6p\" (UniqueName: \"kubernetes.io/projected/9dedaba8-f1c7-4b13-a5c5-78b2ead1753c-kube-api-access-k6w6p\") pod \"9dedaba8-f1c7-4b13-a5c5-78b2ead1753c\" (UID: \"9dedaba8-f1c7-4b13-a5c5-78b2ead1753c\") " Jan 05 22:14:41 crc kubenswrapper[4910]: I0105 22:14:41.652394 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9dedaba8-f1c7-4b13-a5c5-78b2ead1753c-combined-ca-bundle\") pod \"9dedaba8-f1c7-4b13-a5c5-78b2ead1753c\" (UID: \"9dedaba8-f1c7-4b13-a5c5-78b2ead1753c\") " Jan 05 22:14:41 crc kubenswrapper[4910]: I0105 22:14:41.652512 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9dedaba8-f1c7-4b13-a5c5-78b2ead1753c-config-data\") pod \"9dedaba8-f1c7-4b13-a5c5-78b2ead1753c\" (UID: \"9dedaba8-f1c7-4b13-a5c5-78b2ead1753c\") " Jan 05 22:14:41 crc kubenswrapper[4910]: I0105 22:14:41.668555 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9dedaba8-f1c7-4b13-a5c5-78b2ead1753c-kube-api-access-k6w6p" (OuterVolumeSpecName: "kube-api-access-k6w6p") pod "9dedaba8-f1c7-4b13-a5c5-78b2ead1753c" (UID: "9dedaba8-f1c7-4b13-a5c5-78b2ead1753c"). InnerVolumeSpecName "kube-api-access-k6w6p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:14:41 crc kubenswrapper[4910]: I0105 22:14:41.684725 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9dedaba8-f1c7-4b13-a5c5-78b2ead1753c-config-data" (OuterVolumeSpecName: "config-data") pod "9dedaba8-f1c7-4b13-a5c5-78b2ead1753c" (UID: "9dedaba8-f1c7-4b13-a5c5-78b2ead1753c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:14:41 crc kubenswrapper[4910]: I0105 22:14:41.697336 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9dedaba8-f1c7-4b13-a5c5-78b2ead1753c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9dedaba8-f1c7-4b13-a5c5-78b2ead1753c" (UID: "9dedaba8-f1c7-4b13-a5c5-78b2ead1753c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:14:41 crc kubenswrapper[4910]: I0105 22:14:41.758779 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k6w6p\" (UniqueName: \"kubernetes.io/projected/9dedaba8-f1c7-4b13-a5c5-78b2ead1753c-kube-api-access-k6w6p\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:41 crc kubenswrapper[4910]: I0105 22:14:41.758823 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9dedaba8-f1c7-4b13-a5c5-78b2ead1753c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:41 crc kubenswrapper[4910]: I0105 22:14:41.758837 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9dedaba8-f1c7-4b13-a5c5-78b2ead1753c-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:14:42 crc kubenswrapper[4910]: I0105 22:14:42.540412 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 05 22:14:42 crc kubenswrapper[4910]: I0105 22:14:42.541925 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3486557d-93f8-44c2-b40a-dd8aca19d8e1","Type":"ContainerStarted","Data":"9f4f5a94d78ccf55b8b88bf158362b3d9f7fee1d51111812e72271a6887b1360"} Jan 05 22:14:42 crc kubenswrapper[4910]: I0105 22:14:42.542315 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3486557d-93f8-44c2-b40a-dd8aca19d8e1","Type":"ContainerStarted","Data":"a8b2b4d5b559dae71be16c91b6e3ceb8f53c013e2ed93dca2aa9f32d74982c10"} Jan 05 22:14:42 crc kubenswrapper[4910]: I0105 22:14:42.571607 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.571584596 podStartE2EDuration="2.571584596s" podCreationTimestamp="2026-01-05 22:14:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:14:42.559311822 +0000 UTC m=+1414.136809492" watchObservedRunningTime="2026-01-05 22:14:42.571584596 +0000 UTC m=+1414.149082266" Jan 05 22:14:42 crc kubenswrapper[4910]: I0105 22:14:42.589790 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 22:14:42 crc kubenswrapper[4910]: I0105 22:14:42.599557 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 22:14:42 crc kubenswrapper[4910]: I0105 22:14:42.614358 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 22:14:42 crc kubenswrapper[4910]: E0105 22:14:42.614785 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9dedaba8-f1c7-4b13-a5c5-78b2ead1753c" containerName="nova-scheduler-scheduler" Jan 05 22:14:42 crc kubenswrapper[4910]: I0105 22:14:42.614807 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="9dedaba8-f1c7-4b13-a5c5-78b2ead1753c" containerName="nova-scheduler-scheduler" Jan 05 22:14:42 crc kubenswrapper[4910]: I0105 22:14:42.614988 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="9dedaba8-f1c7-4b13-a5c5-78b2ead1753c" containerName="nova-scheduler-scheduler" Jan 05 22:14:42 crc kubenswrapper[4910]: I0105 22:14:42.615631 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 05 22:14:42 crc kubenswrapper[4910]: I0105 22:14:42.618476 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 05 22:14:42 crc kubenswrapper[4910]: I0105 22:14:42.633509 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 22:14:42 crc kubenswrapper[4910]: I0105 22:14:42.679098 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qn2l9\" (UniqueName: \"kubernetes.io/projected/83319bb4-7278-49b3-8ef2-beb8baa0a1a6-kube-api-access-qn2l9\") pod \"nova-scheduler-0\" (UID: \"83319bb4-7278-49b3-8ef2-beb8baa0a1a6\") " pod="openstack/nova-scheduler-0" Jan 05 22:14:42 crc kubenswrapper[4910]: I0105 22:14:42.679425 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83319bb4-7278-49b3-8ef2-beb8baa0a1a6-config-data\") pod \"nova-scheduler-0\" (UID: \"83319bb4-7278-49b3-8ef2-beb8baa0a1a6\") " pod="openstack/nova-scheduler-0" Jan 05 22:14:42 crc kubenswrapper[4910]: I0105 22:14:42.679810 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83319bb4-7278-49b3-8ef2-beb8baa0a1a6-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"83319bb4-7278-49b3-8ef2-beb8baa0a1a6\") " pod="openstack/nova-scheduler-0" Jan 05 22:14:42 crc kubenswrapper[4910]: I0105 22:14:42.734594 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9dedaba8-f1c7-4b13-a5c5-78b2ead1753c" path="/var/lib/kubelet/pods/9dedaba8-f1c7-4b13-a5c5-78b2ead1753c/volumes" Jan 05 22:14:42 crc kubenswrapper[4910]: I0105 22:14:42.782345 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83319bb4-7278-49b3-8ef2-beb8baa0a1a6-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"83319bb4-7278-49b3-8ef2-beb8baa0a1a6\") " pod="openstack/nova-scheduler-0" Jan 05 22:14:42 crc kubenswrapper[4910]: I0105 22:14:42.782506 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qn2l9\" (UniqueName: \"kubernetes.io/projected/83319bb4-7278-49b3-8ef2-beb8baa0a1a6-kube-api-access-qn2l9\") pod \"nova-scheduler-0\" (UID: \"83319bb4-7278-49b3-8ef2-beb8baa0a1a6\") " pod="openstack/nova-scheduler-0" Jan 05 22:14:42 crc kubenswrapper[4910]: I0105 22:14:42.782604 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83319bb4-7278-49b3-8ef2-beb8baa0a1a6-config-data\") pod \"nova-scheduler-0\" (UID: \"83319bb4-7278-49b3-8ef2-beb8baa0a1a6\") " pod="openstack/nova-scheduler-0" Jan 05 22:14:42 crc kubenswrapper[4910]: I0105 22:14:42.790498 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83319bb4-7278-49b3-8ef2-beb8baa0a1a6-config-data\") pod \"nova-scheduler-0\" (UID: \"83319bb4-7278-49b3-8ef2-beb8baa0a1a6\") " pod="openstack/nova-scheduler-0" Jan 05 22:14:42 crc kubenswrapper[4910]: I0105 22:14:42.791260 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83319bb4-7278-49b3-8ef2-beb8baa0a1a6-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"83319bb4-7278-49b3-8ef2-beb8baa0a1a6\") " pod="openstack/nova-scheduler-0" Jan 05 22:14:42 crc kubenswrapper[4910]: I0105 22:14:42.807733 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qn2l9\" (UniqueName: \"kubernetes.io/projected/83319bb4-7278-49b3-8ef2-beb8baa0a1a6-kube-api-access-qn2l9\") pod \"nova-scheduler-0\" (UID: \"83319bb4-7278-49b3-8ef2-beb8baa0a1a6\") " pod="openstack/nova-scheduler-0" Jan 05 22:14:42 crc kubenswrapper[4910]: I0105 22:14:42.933866 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 05 22:14:43 crc kubenswrapper[4910]: I0105 22:14:43.515098 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 22:14:43 crc kubenswrapper[4910]: W0105 22:14:43.521306 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod83319bb4_7278_49b3_8ef2_beb8baa0a1a6.slice/crio-1560d48dd447293696b517aa3d065e515c4b5269315738b62065aa6566e62faa WatchSource:0}: Error finding container 1560d48dd447293696b517aa3d065e515c4b5269315738b62065aa6566e62faa: Status 404 returned error can't find the container with id 1560d48dd447293696b517aa3d065e515c4b5269315738b62065aa6566e62faa Jan 05 22:14:43 crc kubenswrapper[4910]: I0105 22:14:43.559654 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"83319bb4-7278-49b3-8ef2-beb8baa0a1a6","Type":"ContainerStarted","Data":"1560d48dd447293696b517aa3d065e515c4b5269315738b62065aa6566e62faa"} Jan 05 22:14:43 crc kubenswrapper[4910]: I0105 22:14:43.930731 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-6ps7z"] Jan 05 22:14:43 crc kubenswrapper[4910]: I0105 22:14:43.933108 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6ps7z" Jan 05 22:14:43 crc kubenswrapper[4910]: I0105 22:14:43.949083 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6ps7z"] Jan 05 22:14:44 crc kubenswrapper[4910]: I0105 22:14:44.016456 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0de5236f-b5ac-4853-a63d-354cae841e76-catalog-content\") pod \"redhat-operators-6ps7z\" (UID: \"0de5236f-b5ac-4853-a63d-354cae841e76\") " pod="openshift-marketplace/redhat-operators-6ps7z" Jan 05 22:14:44 crc kubenswrapper[4910]: I0105 22:14:44.016695 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0de5236f-b5ac-4853-a63d-354cae841e76-utilities\") pod \"redhat-operators-6ps7z\" (UID: \"0de5236f-b5ac-4853-a63d-354cae841e76\") " pod="openshift-marketplace/redhat-operators-6ps7z" Jan 05 22:14:44 crc kubenswrapper[4910]: I0105 22:14:44.016804 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bhhfn\" (UniqueName: \"kubernetes.io/projected/0de5236f-b5ac-4853-a63d-354cae841e76-kube-api-access-bhhfn\") pod \"redhat-operators-6ps7z\" (UID: \"0de5236f-b5ac-4853-a63d-354cae841e76\") " pod="openshift-marketplace/redhat-operators-6ps7z" Jan 05 22:14:44 crc kubenswrapper[4910]: I0105 22:14:44.118444 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0de5236f-b5ac-4853-a63d-354cae841e76-utilities\") pod \"redhat-operators-6ps7z\" (UID: \"0de5236f-b5ac-4853-a63d-354cae841e76\") " pod="openshift-marketplace/redhat-operators-6ps7z" Jan 05 22:14:44 crc kubenswrapper[4910]: I0105 22:14:44.118540 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bhhfn\" (UniqueName: \"kubernetes.io/projected/0de5236f-b5ac-4853-a63d-354cae841e76-kube-api-access-bhhfn\") pod \"redhat-operators-6ps7z\" (UID: \"0de5236f-b5ac-4853-a63d-354cae841e76\") " pod="openshift-marketplace/redhat-operators-6ps7z" Jan 05 22:14:44 crc kubenswrapper[4910]: I0105 22:14:44.118580 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0de5236f-b5ac-4853-a63d-354cae841e76-catalog-content\") pod \"redhat-operators-6ps7z\" (UID: \"0de5236f-b5ac-4853-a63d-354cae841e76\") " pod="openshift-marketplace/redhat-operators-6ps7z" Jan 05 22:14:44 crc kubenswrapper[4910]: I0105 22:14:44.119056 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0de5236f-b5ac-4853-a63d-354cae841e76-catalog-content\") pod \"redhat-operators-6ps7z\" (UID: \"0de5236f-b5ac-4853-a63d-354cae841e76\") " pod="openshift-marketplace/redhat-operators-6ps7z" Jan 05 22:14:44 crc kubenswrapper[4910]: I0105 22:14:44.119346 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0de5236f-b5ac-4853-a63d-354cae841e76-utilities\") pod \"redhat-operators-6ps7z\" (UID: \"0de5236f-b5ac-4853-a63d-354cae841e76\") " pod="openshift-marketplace/redhat-operators-6ps7z" Jan 05 22:14:44 crc kubenswrapper[4910]: I0105 22:14:44.135297 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bhhfn\" (UniqueName: \"kubernetes.io/projected/0de5236f-b5ac-4853-a63d-354cae841e76-kube-api-access-bhhfn\") pod \"redhat-operators-6ps7z\" (UID: \"0de5236f-b5ac-4853-a63d-354cae841e76\") " pod="openshift-marketplace/redhat-operators-6ps7z" Jan 05 22:14:44 crc kubenswrapper[4910]: I0105 22:14:44.257225 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6ps7z" Jan 05 22:14:44 crc kubenswrapper[4910]: I0105 22:14:44.578107 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"83319bb4-7278-49b3-8ef2-beb8baa0a1a6","Type":"ContainerStarted","Data":"03e2a0482d96bb74144b1ebf3502bf0c9e701db7ab42a851ca5abd53fadbfdf7"} Jan 05 22:14:44 crc kubenswrapper[4910]: I0105 22:14:44.595067 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.595042995 podStartE2EDuration="2.595042995s" podCreationTimestamp="2026-01-05 22:14:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:14:44.593463746 +0000 UTC m=+1416.170961426" watchObservedRunningTime="2026-01-05 22:14:44.595042995 +0000 UTC m=+1416.172540665" Jan 05 22:14:44 crc kubenswrapper[4910]: I0105 22:14:44.750356 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6ps7z"] Jan 05 22:14:44 crc kubenswrapper[4910]: W0105 22:14:44.771249 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0de5236f_b5ac_4853_a63d_354cae841e76.slice/crio-ea5aa53448a1ce5f053ecade92770d4a6d1a21d77d07593cf84eaa56fc7d186b WatchSource:0}: Error finding container ea5aa53448a1ce5f053ecade92770d4a6d1a21d77d07593cf84eaa56fc7d186b: Status 404 returned error can't find the container with id ea5aa53448a1ce5f053ecade92770d4a6d1a21d77d07593cf84eaa56fc7d186b Jan 05 22:14:45 crc kubenswrapper[4910]: I0105 22:14:45.592709 4910 generic.go:334] "Generic (PLEG): container finished" podID="0de5236f-b5ac-4853-a63d-354cae841e76" containerID="de716788637f9d087a8d6995223d51c3f2436e245e306029c6ffe4a6f524ab57" exitCode=0 Jan 05 22:14:45 crc kubenswrapper[4910]: I0105 22:14:45.592791 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6ps7z" event={"ID":"0de5236f-b5ac-4853-a63d-354cae841e76","Type":"ContainerDied","Data":"de716788637f9d087a8d6995223d51c3f2436e245e306029c6ffe4a6f524ab57"} Jan 05 22:14:45 crc kubenswrapper[4910]: I0105 22:14:45.593179 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6ps7z" event={"ID":"0de5236f-b5ac-4853-a63d-354cae841e76","Type":"ContainerStarted","Data":"ea5aa53448a1ce5f053ecade92770d4a6d1a21d77d07593cf84eaa56fc7d186b"} Jan 05 22:14:45 crc kubenswrapper[4910]: I0105 22:14:45.936509 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 05 22:14:45 crc kubenswrapper[4910]: I0105 22:14:45.936572 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 05 22:14:46 crc kubenswrapper[4910]: I0105 22:14:46.858278 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 05 22:14:46 crc kubenswrapper[4910]: I0105 22:14:46.858349 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 05 22:14:47 crc kubenswrapper[4910]: I0105 22:14:47.613979 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6ps7z" event={"ID":"0de5236f-b5ac-4853-a63d-354cae841e76","Type":"ContainerStarted","Data":"371da87b9341501197a1cfcc96b13e6f3317fbb7f2c7781fcffdb7eff43c2c51"} Jan 05 22:14:47 crc kubenswrapper[4910]: I0105 22:14:47.874361 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="cf7e2b20-58e5-4c61-9e50-c1af51acf521" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.200:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 05 22:14:47 crc kubenswrapper[4910]: I0105 22:14:47.874370 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="cf7e2b20-58e5-4c61-9e50-c1af51acf521" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.200:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 05 22:14:47 crc kubenswrapper[4910]: I0105 22:14:47.934974 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 05 22:14:49 crc kubenswrapper[4910]: I0105 22:14:49.638409 4910 generic.go:334] "Generic (PLEG): container finished" podID="0de5236f-b5ac-4853-a63d-354cae841e76" containerID="371da87b9341501197a1cfcc96b13e6f3317fbb7f2c7781fcffdb7eff43c2c51" exitCode=0 Jan 05 22:14:49 crc kubenswrapper[4910]: I0105 22:14:49.638607 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6ps7z" event={"ID":"0de5236f-b5ac-4853-a63d-354cae841e76","Type":"ContainerDied","Data":"371da87b9341501197a1cfcc96b13e6f3317fbb7f2c7781fcffdb7eff43c2c51"} Jan 05 22:14:50 crc kubenswrapper[4910]: I0105 22:14:50.936903 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 05 22:14:50 crc kubenswrapper[4910]: I0105 22:14:50.937303 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 05 22:14:51 crc kubenswrapper[4910]: I0105 22:14:51.957262 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="3486557d-93f8-44c2-b40a-dd8aca19d8e1" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.201:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 05 22:14:51 crc kubenswrapper[4910]: I0105 22:14:51.957312 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="3486557d-93f8-44c2-b40a-dd8aca19d8e1" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.201:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 05 22:14:52 crc kubenswrapper[4910]: I0105 22:14:52.669190 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6ps7z" event={"ID":"0de5236f-b5ac-4853-a63d-354cae841e76","Type":"ContainerStarted","Data":"9ce25be59c4be4401e084994e93c074c09b4171603fb114dc4d3b6f8423f43a2"} Jan 05 22:14:52 crc kubenswrapper[4910]: I0105 22:14:52.711279 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-6ps7z" podStartSLOduration=3.657103152 podStartE2EDuration="9.711250987s" podCreationTimestamp="2026-01-05 22:14:43 +0000 UTC" firstStartedPulling="2026-01-05 22:14:45.595628058 +0000 UTC m=+1417.173125728" lastFinishedPulling="2026-01-05 22:14:51.649775893 +0000 UTC m=+1423.227273563" observedRunningTime="2026-01-05 22:14:52.703593927 +0000 UTC m=+1424.281091597" watchObservedRunningTime="2026-01-05 22:14:52.711250987 +0000 UTC m=+1424.288748657" Jan 05 22:14:52 crc kubenswrapper[4910]: I0105 22:14:52.935289 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 05 22:14:52 crc kubenswrapper[4910]: I0105 22:14:52.966467 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 05 22:14:53 crc kubenswrapper[4910]: I0105 22:14:53.709229 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 05 22:14:53 crc kubenswrapper[4910]: I0105 22:14:53.720794 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 05 22:14:54 crc kubenswrapper[4910]: I0105 22:14:54.258557 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-6ps7z" Jan 05 22:14:54 crc kubenswrapper[4910]: I0105 22:14:54.260840 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-6ps7z" Jan 05 22:14:55 crc kubenswrapper[4910]: I0105 22:14:55.309222 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-6ps7z" podUID="0de5236f-b5ac-4853-a63d-354cae841e76" containerName="registry-server" probeResult="failure" output=< Jan 05 22:14:55 crc kubenswrapper[4910]: timeout: failed to connect service ":50051" within 1s Jan 05 22:14:55 crc kubenswrapper[4910]: > Jan 05 22:14:56 crc kubenswrapper[4910]: I0105 22:14:56.941932 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 05 22:14:56 crc kubenswrapper[4910]: I0105 22:14:56.942998 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 05 22:14:57 crc kubenswrapper[4910]: I0105 22:14:57.035717 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 05 22:14:57 crc kubenswrapper[4910]: I0105 22:14:57.066625 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 05 22:14:57 crc kubenswrapper[4910]: I0105 22:14:57.745268 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 05 22:14:57 crc kubenswrapper[4910]: I0105 22:14:57.751140 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 05 22:15:00 crc kubenswrapper[4910]: I0105 22:15:00.156788 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460855-j2sjl"] Jan 05 22:15:00 crc kubenswrapper[4910]: I0105 22:15:00.159167 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460855-j2sjl" Jan 05 22:15:00 crc kubenswrapper[4910]: I0105 22:15:00.161170 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 05 22:15:00 crc kubenswrapper[4910]: I0105 22:15:00.180373 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 05 22:15:00 crc kubenswrapper[4910]: I0105 22:15:00.191351 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460855-j2sjl"] Jan 05 22:15:00 crc kubenswrapper[4910]: I0105 22:15:00.296234 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5cfd0899-0cb5-4727-9c44-27799c5a2131-config-volume\") pod \"collect-profiles-29460855-j2sjl\" (UID: \"5cfd0899-0cb5-4727-9c44-27799c5a2131\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460855-j2sjl" Jan 05 22:15:00 crc kubenswrapper[4910]: I0105 22:15:00.296819 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5cfd0899-0cb5-4727-9c44-27799c5a2131-secret-volume\") pod \"collect-profiles-29460855-j2sjl\" (UID: \"5cfd0899-0cb5-4727-9c44-27799c5a2131\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460855-j2sjl" Jan 05 22:15:00 crc kubenswrapper[4910]: I0105 22:15:00.297008 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zf6wg\" (UniqueName: \"kubernetes.io/projected/5cfd0899-0cb5-4727-9c44-27799c5a2131-kube-api-access-zf6wg\") pod \"collect-profiles-29460855-j2sjl\" (UID: \"5cfd0899-0cb5-4727-9c44-27799c5a2131\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460855-j2sjl" Jan 05 22:15:00 crc kubenswrapper[4910]: I0105 22:15:00.399467 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5cfd0899-0cb5-4727-9c44-27799c5a2131-secret-volume\") pod \"collect-profiles-29460855-j2sjl\" (UID: \"5cfd0899-0cb5-4727-9c44-27799c5a2131\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460855-j2sjl" Jan 05 22:15:00 crc kubenswrapper[4910]: I0105 22:15:00.399566 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zf6wg\" (UniqueName: \"kubernetes.io/projected/5cfd0899-0cb5-4727-9c44-27799c5a2131-kube-api-access-zf6wg\") pod \"collect-profiles-29460855-j2sjl\" (UID: \"5cfd0899-0cb5-4727-9c44-27799c5a2131\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460855-j2sjl" Jan 05 22:15:00 crc kubenswrapper[4910]: I0105 22:15:00.399661 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5cfd0899-0cb5-4727-9c44-27799c5a2131-config-volume\") pod \"collect-profiles-29460855-j2sjl\" (UID: \"5cfd0899-0cb5-4727-9c44-27799c5a2131\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460855-j2sjl" Jan 05 22:15:00 crc kubenswrapper[4910]: I0105 22:15:00.400793 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5cfd0899-0cb5-4727-9c44-27799c5a2131-config-volume\") pod \"collect-profiles-29460855-j2sjl\" (UID: \"5cfd0899-0cb5-4727-9c44-27799c5a2131\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460855-j2sjl" Jan 05 22:15:00 crc kubenswrapper[4910]: I0105 22:15:00.411171 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5cfd0899-0cb5-4727-9c44-27799c5a2131-secret-volume\") pod \"collect-profiles-29460855-j2sjl\" (UID: \"5cfd0899-0cb5-4727-9c44-27799c5a2131\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460855-j2sjl" Jan 05 22:15:00 crc kubenswrapper[4910]: I0105 22:15:00.421391 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zf6wg\" (UniqueName: \"kubernetes.io/projected/5cfd0899-0cb5-4727-9c44-27799c5a2131-kube-api-access-zf6wg\") pod \"collect-profiles-29460855-j2sjl\" (UID: \"5cfd0899-0cb5-4727-9c44-27799c5a2131\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460855-j2sjl" Jan 05 22:15:00 crc kubenswrapper[4910]: I0105 22:15:00.503797 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460855-j2sjl" Jan 05 22:15:00 crc kubenswrapper[4910]: I0105 22:15:00.941581 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 05 22:15:00 crc kubenswrapper[4910]: I0105 22:15:00.942047 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 05 22:15:00 crc kubenswrapper[4910]: I0105 22:15:00.948600 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 05 22:15:00 crc kubenswrapper[4910]: I0105 22:15:00.948673 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 05 22:15:00 crc kubenswrapper[4910]: I0105 22:15:00.980761 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460855-j2sjl"] Jan 05 22:15:01 crc kubenswrapper[4910]: I0105 22:15:01.806547 4910 generic.go:334] "Generic (PLEG): container finished" podID="5cfd0899-0cb5-4727-9c44-27799c5a2131" containerID="5b446a829202ed8061be13c123b2bf6669781ba6ae975a5b2f50d239038094da" exitCode=0 Jan 05 22:15:01 crc kubenswrapper[4910]: I0105 22:15:01.806653 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29460855-j2sjl" event={"ID":"5cfd0899-0cb5-4727-9c44-27799c5a2131","Type":"ContainerDied","Data":"5b446a829202ed8061be13c123b2bf6669781ba6ae975a5b2f50d239038094da"} Jan 05 22:15:01 crc kubenswrapper[4910]: I0105 22:15:01.806979 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29460855-j2sjl" event={"ID":"5cfd0899-0cb5-4727-9c44-27799c5a2131","Type":"ContainerStarted","Data":"d92dc866898bd25611be736b79fc13d267a3a2e368ddefba1d3deec097b0690a"} Jan 05 22:15:03 crc kubenswrapper[4910]: I0105 22:15:03.124428 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460855-j2sjl" Jan 05 22:15:03 crc kubenswrapper[4910]: I0105 22:15:03.276360 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zf6wg\" (UniqueName: \"kubernetes.io/projected/5cfd0899-0cb5-4727-9c44-27799c5a2131-kube-api-access-zf6wg\") pod \"5cfd0899-0cb5-4727-9c44-27799c5a2131\" (UID: \"5cfd0899-0cb5-4727-9c44-27799c5a2131\") " Jan 05 22:15:03 crc kubenswrapper[4910]: I0105 22:15:03.276427 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5cfd0899-0cb5-4727-9c44-27799c5a2131-config-volume\") pod \"5cfd0899-0cb5-4727-9c44-27799c5a2131\" (UID: \"5cfd0899-0cb5-4727-9c44-27799c5a2131\") " Jan 05 22:15:03 crc kubenswrapper[4910]: I0105 22:15:03.276492 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5cfd0899-0cb5-4727-9c44-27799c5a2131-secret-volume\") pod \"5cfd0899-0cb5-4727-9c44-27799c5a2131\" (UID: \"5cfd0899-0cb5-4727-9c44-27799c5a2131\") " Jan 05 22:15:03 crc kubenswrapper[4910]: I0105 22:15:03.278633 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5cfd0899-0cb5-4727-9c44-27799c5a2131-config-volume" (OuterVolumeSpecName: "config-volume") pod "5cfd0899-0cb5-4727-9c44-27799c5a2131" (UID: "5cfd0899-0cb5-4727-9c44-27799c5a2131"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:03 crc kubenswrapper[4910]: I0105 22:15:03.282442 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5cfd0899-0cb5-4727-9c44-27799c5a2131-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "5cfd0899-0cb5-4727-9c44-27799c5a2131" (UID: "5cfd0899-0cb5-4727-9c44-27799c5a2131"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:03 crc kubenswrapper[4910]: I0105 22:15:03.282900 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5cfd0899-0cb5-4727-9c44-27799c5a2131-kube-api-access-zf6wg" (OuterVolumeSpecName: "kube-api-access-zf6wg") pod "5cfd0899-0cb5-4727-9c44-27799c5a2131" (UID: "5cfd0899-0cb5-4727-9c44-27799c5a2131"). InnerVolumeSpecName "kube-api-access-zf6wg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:03 crc kubenswrapper[4910]: I0105 22:15:03.379137 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zf6wg\" (UniqueName: \"kubernetes.io/projected/5cfd0899-0cb5-4727-9c44-27799c5a2131-kube-api-access-zf6wg\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:03 crc kubenswrapper[4910]: I0105 22:15:03.379188 4910 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5cfd0899-0cb5-4727-9c44-27799c5a2131-config-volume\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:03 crc kubenswrapper[4910]: I0105 22:15:03.379205 4910 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5cfd0899-0cb5-4727-9c44-27799c5a2131-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:03 crc kubenswrapper[4910]: I0105 22:15:03.834412 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29460855-j2sjl" event={"ID":"5cfd0899-0cb5-4727-9c44-27799c5a2131","Type":"ContainerDied","Data":"d92dc866898bd25611be736b79fc13d267a3a2e368ddefba1d3deec097b0690a"} Jan 05 22:15:03 crc kubenswrapper[4910]: I0105 22:15:03.834990 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d92dc866898bd25611be736b79fc13d267a3a2e368ddefba1d3deec097b0690a" Jan 05 22:15:03 crc kubenswrapper[4910]: I0105 22:15:03.834489 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460855-j2sjl" Jan 05 22:15:05 crc kubenswrapper[4910]: I0105 22:15:05.311427 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-6ps7z" podUID="0de5236f-b5ac-4853-a63d-354cae841e76" containerName="registry-server" probeResult="failure" output=< Jan 05 22:15:05 crc kubenswrapper[4910]: timeout: failed to connect service ":50051" within 1s Jan 05 22:15:05 crc kubenswrapper[4910]: > Jan 05 22:15:10 crc kubenswrapper[4910]: I0105 22:15:10.952199 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:15:10 crc kubenswrapper[4910]: I0105 22:15:10.952868 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:15:14 crc kubenswrapper[4910]: I0105 22:15:14.318563 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-6ps7z" Jan 05 22:15:14 crc kubenswrapper[4910]: I0105 22:15:14.375768 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-6ps7z" Jan 05 22:15:15 crc kubenswrapper[4910]: I0105 22:15:15.137889 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6ps7z"] Jan 05 22:15:15 crc kubenswrapper[4910]: I0105 22:15:15.953769 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-6ps7z" podUID="0de5236f-b5ac-4853-a63d-354cae841e76" containerName="registry-server" containerID="cri-o://9ce25be59c4be4401e084994e93c074c09b4171603fb114dc4d3b6f8423f43a2" gracePeriod=2 Jan 05 22:15:16 crc kubenswrapper[4910]: I0105 22:15:16.430909 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6ps7z" Jan 05 22:15:16 crc kubenswrapper[4910]: I0105 22:15:16.539708 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0de5236f-b5ac-4853-a63d-354cae841e76-catalog-content\") pod \"0de5236f-b5ac-4853-a63d-354cae841e76\" (UID: \"0de5236f-b5ac-4853-a63d-354cae841e76\") " Jan 05 22:15:16 crc kubenswrapper[4910]: I0105 22:15:16.539972 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bhhfn\" (UniqueName: \"kubernetes.io/projected/0de5236f-b5ac-4853-a63d-354cae841e76-kube-api-access-bhhfn\") pod \"0de5236f-b5ac-4853-a63d-354cae841e76\" (UID: \"0de5236f-b5ac-4853-a63d-354cae841e76\") " Jan 05 22:15:16 crc kubenswrapper[4910]: I0105 22:15:16.540077 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0de5236f-b5ac-4853-a63d-354cae841e76-utilities\") pod \"0de5236f-b5ac-4853-a63d-354cae841e76\" (UID: \"0de5236f-b5ac-4853-a63d-354cae841e76\") " Jan 05 22:15:16 crc kubenswrapper[4910]: I0105 22:15:16.540818 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0de5236f-b5ac-4853-a63d-354cae841e76-utilities" (OuterVolumeSpecName: "utilities") pod "0de5236f-b5ac-4853-a63d-354cae841e76" (UID: "0de5236f-b5ac-4853-a63d-354cae841e76"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:15:16 crc kubenswrapper[4910]: I0105 22:15:16.546324 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0de5236f-b5ac-4853-a63d-354cae841e76-kube-api-access-bhhfn" (OuterVolumeSpecName: "kube-api-access-bhhfn") pod "0de5236f-b5ac-4853-a63d-354cae841e76" (UID: "0de5236f-b5ac-4853-a63d-354cae841e76"). InnerVolumeSpecName "kube-api-access-bhhfn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:16 crc kubenswrapper[4910]: I0105 22:15:16.642485 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bhhfn\" (UniqueName: \"kubernetes.io/projected/0de5236f-b5ac-4853-a63d-354cae841e76-kube-api-access-bhhfn\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:16 crc kubenswrapper[4910]: I0105 22:15:16.642518 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0de5236f-b5ac-4853-a63d-354cae841e76-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:16 crc kubenswrapper[4910]: I0105 22:15:16.650976 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0de5236f-b5ac-4853-a63d-354cae841e76-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0de5236f-b5ac-4853-a63d-354cae841e76" (UID: "0de5236f-b5ac-4853-a63d-354cae841e76"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:15:16 crc kubenswrapper[4910]: I0105 22:15:16.744315 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0de5236f-b5ac-4853-a63d-354cae841e76-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:16 crc kubenswrapper[4910]: I0105 22:15:16.966512 4910 generic.go:334] "Generic (PLEG): container finished" podID="0de5236f-b5ac-4853-a63d-354cae841e76" containerID="9ce25be59c4be4401e084994e93c074c09b4171603fb114dc4d3b6f8423f43a2" exitCode=0 Jan 05 22:15:16 crc kubenswrapper[4910]: I0105 22:15:16.966569 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6ps7z" event={"ID":"0de5236f-b5ac-4853-a63d-354cae841e76","Type":"ContainerDied","Data":"9ce25be59c4be4401e084994e93c074c09b4171603fb114dc4d3b6f8423f43a2"} Jan 05 22:15:16 crc kubenswrapper[4910]: I0105 22:15:16.966613 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6ps7z" event={"ID":"0de5236f-b5ac-4853-a63d-354cae841e76","Type":"ContainerDied","Data":"ea5aa53448a1ce5f053ecade92770d4a6d1a21d77d07593cf84eaa56fc7d186b"} Jan 05 22:15:16 crc kubenswrapper[4910]: I0105 22:15:16.966624 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6ps7z" Jan 05 22:15:16 crc kubenswrapper[4910]: I0105 22:15:16.966642 4910 scope.go:117] "RemoveContainer" containerID="9ce25be59c4be4401e084994e93c074c09b4171603fb114dc4d3b6f8423f43a2" Jan 05 22:15:17 crc kubenswrapper[4910]: I0105 22:15:16.999069 4910 scope.go:117] "RemoveContainer" containerID="371da87b9341501197a1cfcc96b13e6f3317fbb7f2c7781fcffdb7eff43c2c51" Jan 05 22:15:17 crc kubenswrapper[4910]: I0105 22:15:17.003833 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6ps7z"] Jan 05 22:15:17 crc kubenswrapper[4910]: I0105 22:15:17.014232 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-6ps7z"] Jan 05 22:15:17 crc kubenswrapper[4910]: I0105 22:15:17.023192 4910 scope.go:117] "RemoveContainer" containerID="de716788637f9d087a8d6995223d51c3f2436e245e306029c6ffe4a6f524ab57" Jan 05 22:15:17 crc kubenswrapper[4910]: I0105 22:15:17.075153 4910 scope.go:117] "RemoveContainer" containerID="9ce25be59c4be4401e084994e93c074c09b4171603fb114dc4d3b6f8423f43a2" Jan 05 22:15:17 crc kubenswrapper[4910]: E0105 22:15:17.076335 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ce25be59c4be4401e084994e93c074c09b4171603fb114dc4d3b6f8423f43a2\": container with ID starting with 9ce25be59c4be4401e084994e93c074c09b4171603fb114dc4d3b6f8423f43a2 not found: ID does not exist" containerID="9ce25be59c4be4401e084994e93c074c09b4171603fb114dc4d3b6f8423f43a2" Jan 05 22:15:17 crc kubenswrapper[4910]: I0105 22:15:17.076381 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ce25be59c4be4401e084994e93c074c09b4171603fb114dc4d3b6f8423f43a2"} err="failed to get container status \"9ce25be59c4be4401e084994e93c074c09b4171603fb114dc4d3b6f8423f43a2\": rpc error: code = NotFound desc = could not find container \"9ce25be59c4be4401e084994e93c074c09b4171603fb114dc4d3b6f8423f43a2\": container with ID starting with 9ce25be59c4be4401e084994e93c074c09b4171603fb114dc4d3b6f8423f43a2 not found: ID does not exist" Jan 05 22:15:17 crc kubenswrapper[4910]: I0105 22:15:17.076414 4910 scope.go:117] "RemoveContainer" containerID="371da87b9341501197a1cfcc96b13e6f3317fbb7f2c7781fcffdb7eff43c2c51" Jan 05 22:15:17 crc kubenswrapper[4910]: E0105 22:15:17.076853 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"371da87b9341501197a1cfcc96b13e6f3317fbb7f2c7781fcffdb7eff43c2c51\": container with ID starting with 371da87b9341501197a1cfcc96b13e6f3317fbb7f2c7781fcffdb7eff43c2c51 not found: ID does not exist" containerID="371da87b9341501197a1cfcc96b13e6f3317fbb7f2c7781fcffdb7eff43c2c51" Jan 05 22:15:17 crc kubenswrapper[4910]: I0105 22:15:17.076901 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"371da87b9341501197a1cfcc96b13e6f3317fbb7f2c7781fcffdb7eff43c2c51"} err="failed to get container status \"371da87b9341501197a1cfcc96b13e6f3317fbb7f2c7781fcffdb7eff43c2c51\": rpc error: code = NotFound desc = could not find container \"371da87b9341501197a1cfcc96b13e6f3317fbb7f2c7781fcffdb7eff43c2c51\": container with ID starting with 371da87b9341501197a1cfcc96b13e6f3317fbb7f2c7781fcffdb7eff43c2c51 not found: ID does not exist" Jan 05 22:15:17 crc kubenswrapper[4910]: I0105 22:15:17.076934 4910 scope.go:117] "RemoveContainer" containerID="de716788637f9d087a8d6995223d51c3f2436e245e306029c6ffe4a6f524ab57" Jan 05 22:15:17 crc kubenswrapper[4910]: E0105 22:15:17.077728 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de716788637f9d087a8d6995223d51c3f2436e245e306029c6ffe4a6f524ab57\": container with ID starting with de716788637f9d087a8d6995223d51c3f2436e245e306029c6ffe4a6f524ab57 not found: ID does not exist" containerID="de716788637f9d087a8d6995223d51c3f2436e245e306029c6ffe4a6f524ab57" Jan 05 22:15:17 crc kubenswrapper[4910]: I0105 22:15:17.077756 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de716788637f9d087a8d6995223d51c3f2436e245e306029c6ffe4a6f524ab57"} err="failed to get container status \"de716788637f9d087a8d6995223d51c3f2436e245e306029c6ffe4a6f524ab57\": rpc error: code = NotFound desc = could not find container \"de716788637f9d087a8d6995223d51c3f2436e245e306029c6ffe4a6f524ab57\": container with ID starting with de716788637f9d087a8d6995223d51c3f2436e245e306029c6ffe4a6f524ab57 not found: ID does not exist" Jan 05 22:15:18 crc kubenswrapper[4910]: I0105 22:15:18.736207 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0de5236f-b5ac-4853-a63d-354cae841e76" path="/var/lib/kubelet/pods/0de5236f-b5ac-4853-a63d-354cae841e76/volumes" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.251596 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-lnk9j"] Jan 05 22:15:19 crc kubenswrapper[4910]: E0105 22:15:19.257689 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5cfd0899-0cb5-4727-9c44-27799c5a2131" containerName="collect-profiles" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.257993 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="5cfd0899-0cb5-4727-9c44-27799c5a2131" containerName="collect-profiles" Jan 05 22:15:19 crc kubenswrapper[4910]: E0105 22:15:19.258104 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0de5236f-b5ac-4853-a63d-354cae841e76" containerName="registry-server" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.258214 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="0de5236f-b5ac-4853-a63d-354cae841e76" containerName="registry-server" Jan 05 22:15:19 crc kubenswrapper[4910]: E0105 22:15:19.258321 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0de5236f-b5ac-4853-a63d-354cae841e76" containerName="extract-utilities" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.258405 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="0de5236f-b5ac-4853-a63d-354cae841e76" containerName="extract-utilities" Jan 05 22:15:19 crc kubenswrapper[4910]: E0105 22:15:19.258520 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0de5236f-b5ac-4853-a63d-354cae841e76" containerName="extract-content" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.258605 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="0de5236f-b5ac-4853-a63d-354cae841e76" containerName="extract-content" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.258939 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="0de5236f-b5ac-4853-a63d-354cae841e76" containerName="registry-server" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.266491 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="5cfd0899-0cb5-4727-9c44-27799c5a2131" containerName="collect-profiles" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.267574 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-lnk9j" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.276944 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.318830 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-lnk9j"] Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.378748 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-787f96fcd6-44r4b"] Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.380899 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-787f96fcd6-44r4b" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.454822 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bjtk\" (UniqueName: \"kubernetes.io/projected/f509687a-bb68-4247-b4de-0f0cb99ca389-kube-api-access-8bjtk\") pod \"root-account-create-update-lnk9j\" (UID: \"f509687a-bb68-4247-b4de-0f0cb99ca389\") " pod="openstack/root-account-create-update-lnk9j" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.455140 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f509687a-bb68-4247-b4de-0f0cb99ca389-operator-scripts\") pod \"root-account-create-update-lnk9j\" (UID: \"f509687a-bb68-4247-b4de-0f0cb99ca389\") " pod="openstack/root-account-create-update-lnk9j" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.459676 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-787f96fcd6-44r4b"] Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.566439 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/244c7b09-d3d9-4ae7-864b-ff6758b0de6a-combined-ca-bundle\") pod \"barbican-keystone-listener-787f96fcd6-44r4b\" (UID: \"244c7b09-d3d9-4ae7-864b-ff6758b0de6a\") " pod="openstack/barbican-keystone-listener-787f96fcd6-44r4b" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.566582 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/244c7b09-d3d9-4ae7-864b-ff6758b0de6a-config-data-custom\") pod \"barbican-keystone-listener-787f96fcd6-44r4b\" (UID: \"244c7b09-d3d9-4ae7-864b-ff6758b0de6a\") " pod="openstack/barbican-keystone-listener-787f96fcd6-44r4b" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.566642 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nncgv\" (UniqueName: \"kubernetes.io/projected/244c7b09-d3d9-4ae7-864b-ff6758b0de6a-kube-api-access-nncgv\") pod \"barbican-keystone-listener-787f96fcd6-44r4b\" (UID: \"244c7b09-d3d9-4ae7-864b-ff6758b0de6a\") " pod="openstack/barbican-keystone-listener-787f96fcd6-44r4b" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.566685 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bjtk\" (UniqueName: \"kubernetes.io/projected/f509687a-bb68-4247-b4de-0f0cb99ca389-kube-api-access-8bjtk\") pod \"root-account-create-update-lnk9j\" (UID: \"f509687a-bb68-4247-b4de-0f0cb99ca389\") " pod="openstack/root-account-create-update-lnk9j" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.566746 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/244c7b09-d3d9-4ae7-864b-ff6758b0de6a-logs\") pod \"barbican-keystone-listener-787f96fcd6-44r4b\" (UID: \"244c7b09-d3d9-4ae7-864b-ff6758b0de6a\") " pod="openstack/barbican-keystone-listener-787f96fcd6-44r4b" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.566774 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f509687a-bb68-4247-b4de-0f0cb99ca389-operator-scripts\") pod \"root-account-create-update-lnk9j\" (UID: \"f509687a-bb68-4247-b4de-0f0cb99ca389\") " pod="openstack/root-account-create-update-lnk9j" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.566840 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/244c7b09-d3d9-4ae7-864b-ff6758b0de6a-config-data\") pod \"barbican-keystone-listener-787f96fcd6-44r4b\" (UID: \"244c7b09-d3d9-4ae7-864b-ff6758b0de6a\") " pod="openstack/barbican-keystone-listener-787f96fcd6-44r4b" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.568363 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f509687a-bb68-4247-b4de-0f0cb99ca389-operator-scripts\") pod \"root-account-create-update-lnk9j\" (UID: \"f509687a-bb68-4247-b4de-0f0cb99ca389\") " pod="openstack/root-account-create-update-lnk9j" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.670442 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/244c7b09-d3d9-4ae7-864b-ff6758b0de6a-combined-ca-bundle\") pod \"barbican-keystone-listener-787f96fcd6-44r4b\" (UID: \"244c7b09-d3d9-4ae7-864b-ff6758b0de6a\") " pod="openstack/barbican-keystone-listener-787f96fcd6-44r4b" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.670527 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/244c7b09-d3d9-4ae7-864b-ff6758b0de6a-config-data-custom\") pod \"barbican-keystone-listener-787f96fcd6-44r4b\" (UID: \"244c7b09-d3d9-4ae7-864b-ff6758b0de6a\") " pod="openstack/barbican-keystone-listener-787f96fcd6-44r4b" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.670563 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nncgv\" (UniqueName: \"kubernetes.io/projected/244c7b09-d3d9-4ae7-864b-ff6758b0de6a-kube-api-access-nncgv\") pod \"barbican-keystone-listener-787f96fcd6-44r4b\" (UID: \"244c7b09-d3d9-4ae7-864b-ff6758b0de6a\") " pod="openstack/barbican-keystone-listener-787f96fcd6-44r4b" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.670599 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/244c7b09-d3d9-4ae7-864b-ff6758b0de6a-logs\") pod \"barbican-keystone-listener-787f96fcd6-44r4b\" (UID: \"244c7b09-d3d9-4ae7-864b-ff6758b0de6a\") " pod="openstack/barbican-keystone-listener-787f96fcd6-44r4b" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.670643 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/244c7b09-d3d9-4ae7-864b-ff6758b0de6a-config-data\") pod \"barbican-keystone-listener-787f96fcd6-44r4b\" (UID: \"244c7b09-d3d9-4ae7-864b-ff6758b0de6a\") " pod="openstack/barbican-keystone-listener-787f96fcd6-44r4b" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.672639 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-d6c5d94b9-llc4f"] Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.674420 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-d6c5d94b9-llc4f" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.678980 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/244c7b09-d3d9-4ae7-864b-ff6758b0de6a-logs\") pod \"barbican-keystone-listener-787f96fcd6-44r4b\" (UID: \"244c7b09-d3d9-4ae7-864b-ff6758b0de6a\") " pod="openstack/barbican-keystone-listener-787f96fcd6-44r4b" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.686720 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-d6c5d94b9-llc4f"] Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.690614 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/244c7b09-d3d9-4ae7-864b-ff6758b0de6a-combined-ca-bundle\") pod \"barbican-keystone-listener-787f96fcd6-44r4b\" (UID: \"244c7b09-d3d9-4ae7-864b-ff6758b0de6a\") " pod="openstack/barbican-keystone-listener-787f96fcd6-44r4b" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.730567 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/244c7b09-d3d9-4ae7-864b-ff6758b0de6a-config-data\") pod \"barbican-keystone-listener-787f96fcd6-44r4b\" (UID: \"244c7b09-d3d9-4ae7-864b-ff6758b0de6a\") " pod="openstack/barbican-keystone-listener-787f96fcd6-44r4b" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.736807 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/244c7b09-d3d9-4ae7-864b-ff6758b0de6a-config-data-custom\") pod \"barbican-keystone-listener-787f96fcd6-44r4b\" (UID: \"244c7b09-d3d9-4ae7-864b-ff6758b0de6a\") " pod="openstack/barbican-keystone-listener-787f96fcd6-44r4b" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.758717 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nncgv\" (UniqueName: \"kubernetes.io/projected/244c7b09-d3d9-4ae7-864b-ff6758b0de6a-kube-api-access-nncgv\") pod \"barbican-keystone-listener-787f96fcd6-44r4b\" (UID: \"244c7b09-d3d9-4ae7-864b-ff6758b0de6a\") " pod="openstack/barbican-keystone-listener-787f96fcd6-44r4b" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.772491 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dc0e5b95-8658-440f-8771-c67a74098057-config-data\") pod \"barbican-worker-d6c5d94b9-llc4f\" (UID: \"dc0e5b95-8658-440f-8771-c67a74098057\") " pod="openstack/barbican-worker-d6c5d94b9-llc4f" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.772582 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc0e5b95-8658-440f-8771-c67a74098057-combined-ca-bundle\") pod \"barbican-worker-d6c5d94b9-llc4f\" (UID: \"dc0e5b95-8658-440f-8771-c67a74098057\") " pod="openstack/barbican-worker-d6c5d94b9-llc4f" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.772628 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gc875\" (UniqueName: \"kubernetes.io/projected/dc0e5b95-8658-440f-8771-c67a74098057-kube-api-access-gc875\") pod \"barbican-worker-d6c5d94b9-llc4f\" (UID: \"dc0e5b95-8658-440f-8771-c67a74098057\") " pod="openstack/barbican-worker-d6c5d94b9-llc4f" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.772730 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dc0e5b95-8658-440f-8771-c67a74098057-logs\") pod \"barbican-worker-d6c5d94b9-llc4f\" (UID: \"dc0e5b95-8658-440f-8771-c67a74098057\") " pod="openstack/barbican-worker-d6c5d94b9-llc4f" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.772799 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dc0e5b95-8658-440f-8771-c67a74098057-config-data-custom\") pod \"barbican-worker-d6c5d94b9-llc4f\" (UID: \"dc0e5b95-8658-440f-8771-c67a74098057\") " pod="openstack/barbican-worker-d6c5d94b9-llc4f" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.785947 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bjtk\" (UniqueName: \"kubernetes.io/projected/f509687a-bb68-4247-b4de-0f0cb99ca389-kube-api-access-8bjtk\") pod \"root-account-create-update-lnk9j\" (UID: \"f509687a-bb68-4247-b4de-0f0cb99ca389\") " pod="openstack/root-account-create-update-lnk9j" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.787793 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-2c36-account-create-update-777vv"] Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.795606 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-2c36-account-create-update-777vv" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.814524 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.832521 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-2c36-account-create-update-777vv"] Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.872288 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-1a42-account-create-update-4vz6m"] Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.874747 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1a42-account-create-update-4vz6m" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.878681 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dc0e5b95-8658-440f-8771-c67a74098057-config-data\") pod \"barbican-worker-d6c5d94b9-llc4f\" (UID: \"dc0e5b95-8658-440f-8771-c67a74098057\") " pod="openstack/barbican-worker-d6c5d94b9-llc4f" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.878782 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc0e5b95-8658-440f-8771-c67a74098057-combined-ca-bundle\") pod \"barbican-worker-d6c5d94b9-llc4f\" (UID: \"dc0e5b95-8658-440f-8771-c67a74098057\") " pod="openstack/barbican-worker-d6c5d94b9-llc4f" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.878834 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gc875\" (UniqueName: \"kubernetes.io/projected/dc0e5b95-8658-440f-8771-c67a74098057-kube-api-access-gc875\") pod \"barbican-worker-d6c5d94b9-llc4f\" (UID: \"dc0e5b95-8658-440f-8771-c67a74098057\") " pod="openstack/barbican-worker-d6c5d94b9-llc4f" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.878891 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/04ef3843-8448-4842-aaf3-7e2bcc428122-operator-scripts\") pod \"glance-2c36-account-create-update-777vv\" (UID: \"04ef3843-8448-4842-aaf3-7e2bcc428122\") " pod="openstack/glance-2c36-account-create-update-777vv" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.878952 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dvn7m\" (UniqueName: \"kubernetes.io/projected/04ef3843-8448-4842-aaf3-7e2bcc428122-kube-api-access-dvn7m\") pod \"glance-2c36-account-create-update-777vv\" (UID: \"04ef3843-8448-4842-aaf3-7e2bcc428122\") " pod="openstack/glance-2c36-account-create-update-777vv" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.879006 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dc0e5b95-8658-440f-8771-c67a74098057-logs\") pod \"barbican-worker-d6c5d94b9-llc4f\" (UID: \"dc0e5b95-8658-440f-8771-c67a74098057\") " pod="openstack/barbican-worker-d6c5d94b9-llc4f" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.879081 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dc0e5b95-8658-440f-8771-c67a74098057-config-data-custom\") pod \"barbican-worker-d6c5d94b9-llc4f\" (UID: \"dc0e5b95-8658-440f-8771-c67a74098057\") " pod="openstack/barbican-worker-d6c5d94b9-llc4f" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.888075 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dc0e5b95-8658-440f-8771-c67a74098057-config-data-custom\") pod \"barbican-worker-d6c5d94b9-llc4f\" (UID: \"dc0e5b95-8658-440f-8771-c67a74098057\") " pod="openstack/barbican-worker-d6c5d94b9-llc4f" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.902618 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-lnk9j" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.903898 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dc0e5b95-8658-440f-8771-c67a74098057-logs\") pod \"barbican-worker-d6c5d94b9-llc4f\" (UID: \"dc0e5b95-8658-440f-8771-c67a74098057\") " pod="openstack/barbican-worker-d6c5d94b9-llc4f" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.910380 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.912279 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dc0e5b95-8658-440f-8771-c67a74098057-config-data\") pod \"barbican-worker-d6c5d94b9-llc4f\" (UID: \"dc0e5b95-8658-440f-8771-c67a74098057\") " pod="openstack/barbican-worker-d6c5d94b9-llc4f" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.913554 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc0e5b95-8658-440f-8771-c67a74098057-combined-ca-bundle\") pod \"barbican-worker-d6c5d94b9-llc4f\" (UID: \"dc0e5b95-8658-440f-8771-c67a74098057\") " pod="openstack/barbican-worker-d6c5d94b9-llc4f" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.952511 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-6b8d97d96d-jbcrk"] Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.954280 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6b8d97d96d-jbcrk" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.983765 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gc875\" (UniqueName: \"kubernetes.io/projected/dc0e5b95-8658-440f-8771-c67a74098057-kube-api-access-gc875\") pod \"barbican-worker-d6c5d94b9-llc4f\" (UID: \"dc0e5b95-8658-440f-8771-c67a74098057\") " pod="openstack/barbican-worker-d6c5d94b9-llc4f" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.996958 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/04ef3843-8448-4842-aaf3-7e2bcc428122-operator-scripts\") pod \"glance-2c36-account-create-update-777vv\" (UID: \"04ef3843-8448-4842-aaf3-7e2bcc428122\") " pod="openstack/glance-2c36-account-create-update-777vv" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.997039 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dvn7m\" (UniqueName: \"kubernetes.io/projected/04ef3843-8448-4842-aaf3-7e2bcc428122-kube-api-access-dvn7m\") pod \"glance-2c36-account-create-update-777vv\" (UID: \"04ef3843-8448-4842-aaf3-7e2bcc428122\") " pod="openstack/glance-2c36-account-create-update-777vv" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.997169 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e63178b0-da1f-4d9c-b680-9fdddcd51b9a-operator-scripts\") pod \"barbican-1a42-account-create-update-4vz6m\" (UID: \"e63178b0-da1f-4d9c-b680-9fdddcd51b9a\") " pod="openstack/barbican-1a42-account-create-update-4vz6m" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.997237 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wcphr\" (UniqueName: \"kubernetes.io/projected/e63178b0-da1f-4d9c-b680-9fdddcd51b9a-kube-api-access-wcphr\") pod \"barbican-1a42-account-create-update-4vz6m\" (UID: \"e63178b0-da1f-4d9c-b680-9fdddcd51b9a\") " pod="openstack/barbican-1a42-account-create-update-4vz6m" Jan 05 22:15:19 crc kubenswrapper[4910]: I0105 22:15:19.998065 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/04ef3843-8448-4842-aaf3-7e2bcc428122-operator-scripts\") pod \"glance-2c36-account-create-update-777vv\" (UID: \"04ef3843-8448-4842-aaf3-7e2bcc428122\") " pod="openstack/glance-2c36-account-create-update-777vv" Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.025272 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-1a42-account-create-update-4vz6m"] Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.043171 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-d6c5d94b9-llc4f" Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.045632 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6b8d97d96d-jbcrk"] Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.055435 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-787f96fcd6-44r4b" Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.063182 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dvn7m\" (UniqueName: \"kubernetes.io/projected/04ef3843-8448-4842-aaf3-7e2bcc428122-kube-api-access-dvn7m\") pod \"glance-2c36-account-create-update-777vv\" (UID: \"04ef3843-8448-4842-aaf3-7e2bcc428122\") " pod="openstack/glance-2c36-account-create-update-777vv" Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.092424 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-2c36-account-create-update-777vv" Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.099208 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-zzbhs"] Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.104978 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wcphr\" (UniqueName: \"kubernetes.io/projected/e63178b0-da1f-4d9c-b680-9fdddcd51b9a-kube-api-access-wcphr\") pod \"barbican-1a42-account-create-update-4vz6m\" (UID: \"e63178b0-da1f-4d9c-b680-9fdddcd51b9a\") " pod="openstack/barbican-1a42-account-create-update-4vz6m" Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.105241 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-public-tls-certs\") pod \"barbican-api-6b8d97d96d-jbcrk\" (UID: \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\") " pod="openstack/barbican-api-6b8d97d96d-jbcrk" Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.105379 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-config-data\") pod \"barbican-api-6b8d97d96d-jbcrk\" (UID: \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\") " pod="openstack/barbican-api-6b8d97d96d-jbcrk" Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.105705 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-internal-tls-certs\") pod \"barbican-api-6b8d97d96d-jbcrk\" (UID: \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\") " pod="openstack/barbican-api-6b8d97d96d-jbcrk" Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.105798 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-combined-ca-bundle\") pod \"barbican-api-6b8d97d96d-jbcrk\" (UID: \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\") " pod="openstack/barbican-api-6b8d97d96d-jbcrk" Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.105909 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-config-data-custom\") pod \"barbican-api-6b8d97d96d-jbcrk\" (UID: \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\") " pod="openstack/barbican-api-6b8d97d96d-jbcrk" Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.106007 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-logs\") pod \"barbican-api-6b8d97d96d-jbcrk\" (UID: \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\") " pod="openstack/barbican-api-6b8d97d96d-jbcrk" Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.106099 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtrfx\" (UniqueName: \"kubernetes.io/projected/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-kube-api-access-jtrfx\") pod \"barbican-api-6b8d97d96d-jbcrk\" (UID: \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\") " pod="openstack/barbican-api-6b8d97d96d-jbcrk" Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.106239 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e63178b0-da1f-4d9c-b680-9fdddcd51b9a-operator-scripts\") pod \"barbican-1a42-account-create-update-4vz6m\" (UID: \"e63178b0-da1f-4d9c-b680-9fdddcd51b9a\") " pod="openstack/barbican-1a42-account-create-update-4vz6m" Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.107202 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e63178b0-da1f-4d9c-b680-9fdddcd51b9a-operator-scripts\") pod \"barbican-1a42-account-create-update-4vz6m\" (UID: \"e63178b0-da1f-4d9c-b680-9fdddcd51b9a\") " pod="openstack/barbican-1a42-account-create-update-4vz6m" Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.171422 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.206211 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wcphr\" (UniqueName: \"kubernetes.io/projected/e63178b0-da1f-4d9c-b680-9fdddcd51b9a-kube-api-access-wcphr\") pod \"barbican-1a42-account-create-update-4vz6m\" (UID: \"e63178b0-da1f-4d9c-b680-9fdddcd51b9a\") " pod="openstack/barbican-1a42-account-create-update-4vz6m" Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.211745 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-public-tls-certs\") pod \"barbican-api-6b8d97d96d-jbcrk\" (UID: \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\") " pod="openstack/barbican-api-6b8d97d96d-jbcrk" Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.211868 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-config-data\") pod \"barbican-api-6b8d97d96d-jbcrk\" (UID: \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\") " pod="openstack/barbican-api-6b8d97d96d-jbcrk" Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.215834 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-internal-tls-certs\") pod \"barbican-api-6b8d97d96d-jbcrk\" (UID: \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\") " pod="openstack/barbican-api-6b8d97d96d-jbcrk" Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.215950 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-combined-ca-bundle\") pod \"barbican-api-6b8d97d96d-jbcrk\" (UID: \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\") " pod="openstack/barbican-api-6b8d97d96d-jbcrk" Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.216080 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-config-data-custom\") pod \"barbican-api-6b8d97d96d-jbcrk\" (UID: \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\") " pod="openstack/barbican-api-6b8d97d96d-jbcrk" Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.216155 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-logs\") pod \"barbican-api-6b8d97d96d-jbcrk\" (UID: \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\") " pod="openstack/barbican-api-6b8d97d96d-jbcrk" Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.216236 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtrfx\" (UniqueName: \"kubernetes.io/projected/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-kube-api-access-jtrfx\") pod \"barbican-api-6b8d97d96d-jbcrk\" (UID: \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\") " pod="openstack/barbican-api-6b8d97d96d-jbcrk" Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.228317 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-zzbhs"] Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.230067 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-logs\") pod \"barbican-api-6b8d97d96d-jbcrk\" (UID: \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\") " pod="openstack/barbican-api-6b8d97d96d-jbcrk" Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.231170 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-config-data\") pod \"barbican-api-6b8d97d96d-jbcrk\" (UID: \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\") " pod="openstack/barbican-api-6b8d97d96d-jbcrk" Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.246597 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-config-data-custom\") pod \"barbican-api-6b8d97d96d-jbcrk\" (UID: \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\") " pod="openstack/barbican-api-6b8d97d96d-jbcrk" Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.250230 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-combined-ca-bundle\") pod \"barbican-api-6b8d97d96d-jbcrk\" (UID: \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\") " pod="openstack/barbican-api-6b8d97d96d-jbcrk" Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.261301 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-internal-tls-certs\") pod \"barbican-api-6b8d97d96d-jbcrk\" (UID: \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\") " pod="openstack/barbican-api-6b8d97d96d-jbcrk" Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.269143 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-public-tls-certs\") pod \"barbican-api-6b8d97d96d-jbcrk\" (UID: \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\") " pod="openstack/barbican-api-6b8d97d96d-jbcrk" Jan 05 22:15:20 crc kubenswrapper[4910]: E0105 22:15:20.277033 4910 projected.go:194] Error preparing data for projected volume kube-api-access-jtrfx for pod openstack/barbican-api-6b8d97d96d-jbcrk: failed to fetch token: serviceaccounts "barbican-barbican" not found Jan 05 22:15:20 crc kubenswrapper[4910]: E0105 22:15:20.277508 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-kube-api-access-jtrfx podName:3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69 nodeName:}" failed. No retries permitted until 2026-01-05 22:15:20.777483858 +0000 UTC m=+1452.354981528 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-jtrfx" (UniqueName: "kubernetes.io/projected/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-kube-api-access-jtrfx") pod "barbican-api-6b8d97d96d-jbcrk" (UID: "3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69") : failed to fetch token: serviceaccounts "barbican-barbican" not found Jan 05 22:15:20 crc kubenswrapper[4910]: E0105 22:15:20.331637 4910 secret.go:188] Couldn't get secret openstack/barbican-config-data: secret "barbican-config-data" not found Jan 05 22:15:20 crc kubenswrapper[4910]: E0105 22:15:20.331702 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-config-data podName:3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69 nodeName:}" failed. No retries permitted until 2026-01-05 22:15:20.831684263 +0000 UTC m=+1452.409181933 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-config-data") pod "barbican-api-6b8d97d96d-jbcrk" (UID: "3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69") : secret "barbican-config-data" not found Jan 05 22:15:20 crc kubenswrapper[4910]: E0105 22:15:20.335507 4910 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 05 22:15:20 crc kubenswrapper[4910]: E0105 22:15:20.335573 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7e2a3efd-2de7-493e-af91-900b224e5313-config-data podName:7e2a3efd-2de7-493e-af91-900b224e5313 nodeName:}" failed. No retries permitted until 2026-01-05 22:15:20.835550039 +0000 UTC m=+1452.413047709 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/7e2a3efd-2de7-493e-af91-900b224e5313-config-data") pod "rabbitmq-server-0" (UID: "7e2a3efd-2de7-493e-af91-900b224e5313") : configmap "rabbitmq-config-data" not found Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.409549 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-2c36-account-create-update-x4kcc"] Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.431253 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.431525 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a" containerName="openstackclient" containerID="cri-o://b0963465056d457d77ba82b41400764fc535aa116bd7004b3c5f6069bc02b174" gracePeriod=2 Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.452530 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1a42-account-create-update-4vz6m" Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.472819 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-2c36-account-create-update-x4kcc"] Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.477468 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.511194 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-sqdcz"] Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.511494 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-metrics-sqdcz" podUID="266ffadc-b889-4089-9779-c64623269d42" containerName="openstack-network-exporter" containerID="cri-o://0aed0283be1d2d7717625b9ca57d441f05965d2b141a4e5d7c184eead1f9c999" gracePeriod=30 Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.524105 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-9g2kt"] Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.547478 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-cfp97"] Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.569180 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-1a42-account-create-update-4mgz8"] Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.600260 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-jn5f9"] Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.640732 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-jn5f9"] Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.709215 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-1a42-account-create-update-4mgz8"] Jan 05 22:15:20 crc kubenswrapper[4910]: I0105 22:15:20.878098 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtrfx\" (UniqueName: \"kubernetes.io/projected/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-kube-api-access-jtrfx\") pod \"barbican-api-6b8d97d96d-jbcrk\" (UID: \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\") " pod="openstack/barbican-api-6b8d97d96d-jbcrk" Jan 05 22:15:20 crc kubenswrapper[4910]: E0105 22:15:20.879625 4910 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 05 22:15:20 crc kubenswrapper[4910]: E0105 22:15:20.879788 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7e2a3efd-2de7-493e-af91-900b224e5313-config-data podName:7e2a3efd-2de7-493e-af91-900b224e5313 nodeName:}" failed. No retries permitted until 2026-01-05 22:15:21.879769286 +0000 UTC m=+1453.457266956 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/7e2a3efd-2de7-493e-af91-900b224e5313-config-data") pod "rabbitmq-server-0" (UID: "7e2a3efd-2de7-493e-af91-900b224e5313") : configmap "rabbitmq-config-data" not found Jan 05 22:15:20 crc kubenswrapper[4910]: E0105 22:15:20.880313 4910 secret.go:188] Couldn't get secret openstack/barbican-config-data: secret "barbican-config-data" not found Jan 05 22:15:20 crc kubenswrapper[4910]: E0105 22:15:20.880412 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-config-data podName:3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69 nodeName:}" failed. No retries permitted until 2026-01-05 22:15:21.880403662 +0000 UTC m=+1453.457901332 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-config-data") pod "barbican-api-6b8d97d96d-jbcrk" (UID: "3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69") : secret "barbican-config-data" not found Jan 05 22:15:20 crc kubenswrapper[4910]: E0105 22:15:20.887233 4910 projected.go:194] Error preparing data for projected volume kube-api-access-jtrfx for pod openstack/barbican-api-6b8d97d96d-jbcrk: failed to fetch token: serviceaccounts "barbican-barbican" not found Jan 05 22:15:20 crc kubenswrapper[4910]: E0105 22:15:20.887321 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-kube-api-access-jtrfx podName:3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69 nodeName:}" failed. No retries permitted until 2026-01-05 22:15:21.887298063 +0000 UTC m=+1453.464795733 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-jtrfx" (UniqueName: "kubernetes.io/projected/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-kube-api-access-jtrfx") pod "barbican-api-6b8d97d96d-jbcrk" (UID: "3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69") : failed to fetch token: serviceaccounts "barbican-barbican" not found Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.050227 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-sqdcz_266ffadc-b889-4089-9779-c64623269d42/openstack-network-exporter/0.log" Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.050284 4910 generic.go:334] "Generic (PLEG): container finished" podID="266ffadc-b889-4089-9779-c64623269d42" containerID="0aed0283be1d2d7717625b9ca57d441f05965d2b141a4e5d7c184eead1f9c999" exitCode=2 Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.067035 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5129c20f-7129-4dde-b59e-3d2d291c87c1" path="/var/lib/kubelet/pods/5129c20f-7129-4dde-b59e-3d2d291c87c1/volumes" Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.068003 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="726618d0-e442-410e-87df-33bca2cf52a4" path="/var/lib/kubelet/pods/726618d0-e442-410e-87df-33bca2cf52a4/volumes" Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.068785 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf38198b-8f7d-4853-ba3d-e1968aa3a284" path="/var/lib/kubelet/pods/bf38198b-8f7d-4853-ba3d-e1968aa3a284/volumes" Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.069535 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ea248b17-90e3-464f-803b-a95ec269a2ad" path="/var/lib/kubelet/pods/ea248b17-90e3-464f-803b-a95ec269a2ad/volumes" Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.071510 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-sqdcz" event={"ID":"266ffadc-b889-4089-9779-c64623269d42","Type":"ContainerDied","Data":"0aed0283be1d2d7717625b9ca57d441f05965d2b141a4e5d7c184eead1f9c999"} Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.071555 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-4765c"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.071580 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-b0f9-account-create-update-f5pmg"] Jan 05 22:15:21 crc kubenswrapper[4910]: E0105 22:15:21.073862 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a" containerName="openstackclient" Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.073889 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a" containerName="openstackclient" Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.074181 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a" containerName="openstackclient" Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.075003 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-4765c"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.075030 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-b0f9-account-create-update-f5pmg"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.075045 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-75wjt"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.076506 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-b0f9-account-create-update-jn9k7"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.076534 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-75wjt"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.076553 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-b0f9-account-create-update-jn9k7"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.076567 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.076585 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-dgttl"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.076609 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-dgttl"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.076623 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-941b-account-create-update-rj8nc"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.076637 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-941b-account-create-update-rj8nc"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.076649 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-398d-account-create-update-7q6nt"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.076933 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="f817e58c-a8aa-4f0d-8486-153659100a11" containerName="ovn-northd" containerID="cri-o://775a40a61c5552bd695159a6992312acd259814d06d50b9be807a55dfc4a58bf" gracePeriod=30 Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.077182 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-b0f9-account-create-update-f5pmg" Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.080016 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="f817e58c-a8aa-4f0d-8486-153659100a11" containerName="openstack-network-exporter" containerID="cri-o://6ca0b92febedb3c5a00c06e828a71d381ce355e0b02c17a53a8e077be5c0a627" gracePeriod=30 Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.088658 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.100112 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-398d-account-create-update-7q6nt"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.120335 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-8k5dk"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.144800 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.145433 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="19d63cd6-26c3-439b-a9f6-5a53f27d9e0e" containerName="cinder-scheduler" containerID="cri-o://aa48fa221aab1fca8baf355d7d5b238e363506882c0807675e58c0556680cf81" gracePeriod=30 Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.145584 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="19d63cd6-26c3-439b-a9f6-5a53f27d9e0e" containerName="probe" containerID="cri-o://e56637041d9755fd6fda8b6ee2207de4c4a054e4001e101db38b784bf6a8eb7a" gracePeriod=30 Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.159436 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-8k5dk"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.175094 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.175872 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="206d2077-4a66-4c6d-aa55-6bf0e0f88c2c" containerName="openstack-network-exporter" containerID="cri-o://5e44050096e20cb6a25794fd8f53d149477e83b2422b09ae2b60db07b3bfa76e" gracePeriod=300 Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.188537 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dgdr2\" (UniqueName: \"kubernetes.io/projected/8db5b2ca-0224-4f13-b7d6-be9fb7aa7e07-kube-api-access-dgdr2\") pod \"nova-api-b0f9-account-create-update-f5pmg\" (UID: \"8db5b2ca-0224-4f13-b7d6-be9fb7aa7e07\") " pod="openstack/nova-api-b0f9-account-create-update-f5pmg" Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.188727 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8db5b2ca-0224-4f13-b7d6-be9fb7aa7e07-operator-scripts\") pod \"nova-api-b0f9-account-create-update-f5pmg\" (UID: \"8db5b2ca-0224-4f13-b7d6-be9fb7aa7e07\") " pod="openstack/nova-api-b0f9-account-create-update-f5pmg" Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.201537 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.202281 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="c6909118-b0ce-402c-8bb4-7ce665250739" containerName="openstack-network-exporter" containerID="cri-o://59360022044ca27d9ee5757033d8f3c5a80aee6675d87823287602295063b5ec" gracePeriod=300 Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.211951 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.212347 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="07efd759-c536-425d-938e-a8ccd41706cd" containerName="cinder-api-log" containerID="cri-o://6daa2eb7900c845da95b4889f00144bf520b49eeafeeefc6d62129f8760b3df1" gracePeriod=30 Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.213112 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="07efd759-c536-425d-938e-a8ccd41706cd" containerName="cinder-api" containerID="cri-o://a0e248b48425380302b1988bb335f1102fb9d344cce326d7af9e5dd2f6475bc5" gracePeriod=30 Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.231462 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-kjj9c"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.259151 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-524a-account-create-update-d5t9q"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.278701 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-524a-account-create-update-d5t9q"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.291838 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8db5b2ca-0224-4f13-b7d6-be9fb7aa7e07-operator-scripts\") pod \"nova-api-b0f9-account-create-update-f5pmg\" (UID: \"8db5b2ca-0224-4f13-b7d6-be9fb7aa7e07\") " pod="openstack/nova-api-b0f9-account-create-update-f5pmg" Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.291953 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dgdr2\" (UniqueName: \"kubernetes.io/projected/8db5b2ca-0224-4f13-b7d6-be9fb7aa7e07-kube-api-access-dgdr2\") pod \"nova-api-b0f9-account-create-update-f5pmg\" (UID: \"8db5b2ca-0224-4f13-b7d6-be9fb7aa7e07\") " pod="openstack/nova-api-b0f9-account-create-update-f5pmg" Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.293141 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8db5b2ca-0224-4f13-b7d6-be9fb7aa7e07-operator-scripts\") pod \"nova-api-b0f9-account-create-update-f5pmg\" (UID: \"8db5b2ca-0224-4f13-b7d6-be9fb7aa7e07\") " pod="openstack/nova-api-b0f9-account-create-update-f5pmg" Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.301945 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-ktvmp"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.320497 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dgdr2\" (UniqueName: \"kubernetes.io/projected/8db5b2ca-0224-4f13-b7d6-be9fb7aa7e07-kube-api-access-dgdr2\") pod \"nova-api-b0f9-account-create-update-f5pmg\" (UID: \"8db5b2ca-0224-4f13-b7d6-be9fb7aa7e07\") " pod="openstack/nova-api-b0f9-account-create-update-f5pmg" Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.322613 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-kjj9c"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.337060 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-ktvmp"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.349676 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="206d2077-4a66-4c6d-aa55-6bf0e0f88c2c" containerName="ovsdbserver-sb" containerID="cri-o://ae292dd58468a3ca3fe41cf2714cbea7c847466d8e283f8fbe34e48f2fc358f9" gracePeriod=300 Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.351319 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-mdgvq"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.366291 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-6c69d8c8f7-7w2gb"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.366792 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-6c69d8c8f7-7w2gb" podUID="227b48c0-2e23-4048-8fb5-21628bd9e5e0" containerName="neutron-api" containerID="cri-o://5e9cd39ea8845a5fd2c6e7c0fe1c864ac551845861a02b8b20ce5e8da8cd01fb" gracePeriod=30 Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.367245 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-6c69d8c8f7-7w2gb" podUID="227b48c0-2e23-4048-8fb5-21628bd9e5e0" containerName="neutron-httpd" containerID="cri-o://0ce63635905b4359223cc707716af9867aeeb87e2e260750761f5c1bca381777" gracePeriod=30 Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.390484 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="c6909118-b0ce-402c-8bb4-7ce665250739" containerName="ovsdbserver-nb" containerID="cri-o://839ae5ea9fbfa6a3aa2a1bb5b86ebdc2961253bc0eb7f9f7fe77393230b78a2e" gracePeriod=300 Jan 05 22:15:21 crc kubenswrapper[4910]: E0105 22:15:21.392686 4910 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err="command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: " execCommand=["/usr/share/ovn/scripts/ovn-ctl","stop_controller"] containerName="ovn-controller" pod="openstack/ovn-controller-cfp97" message=< Jan 05 22:15:21 crc kubenswrapper[4910]: Exiting ovn-controller (1) [ OK ] Jan 05 22:15:21 crc kubenswrapper[4910]: > Jan 05 22:15:21 crc kubenswrapper[4910]: E0105 22:15:21.392744 4910 kuberuntime_container.go:691] "PreStop hook failed" err="command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: " pod="openstack/ovn-controller-cfp97" podUID="9253fb1e-9dce-4e54-80ee-fba5e3152596" containerName="ovn-controller" containerID="cri-o://367036c1944402d903c48f5737322433b3dce3b8986e5bf1249815eb02e56af6" Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.392807 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-cfp97" podUID="9253fb1e-9dce-4e54-80ee-fba5e3152596" containerName="ovn-controller" containerID="cri-o://367036c1944402d903c48f5737322433b3dce3b8986e5bf1249815eb02e56af6" gracePeriod=30 Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.418445 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-mdgvq"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.440738 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.441229 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="account-server" containerID="cri-o://f2769fedd4f026dd164121600d29619d9faa807462e36ef2df370d00a00de88f" gracePeriod=30 Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.441640 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="swift-recon-cron" containerID="cri-o://9b00f43f9dea3110ed5f648eaaad264722a104ce4177678f5dfd3b49816ef94f" gracePeriod=30 Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.441692 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="rsync" containerID="cri-o://16a4b970b359fbc6fc563656363aa95e36f86df606367a79bbe2212753463870" gracePeriod=30 Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.441748 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="object-expirer" containerID="cri-o://831fd24da04e59f5338c337a32590b1382ff92df1e292249c377b504749c88e0" gracePeriod=30 Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.441779 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="object-updater" containerID="cri-o://003b527dd2c8b643268b3cff916e9f4b6fbe1f8126957b42aa16a3434a320025" gracePeriod=30 Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.441809 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="object-auditor" containerID="cri-o://5739ed0c5ca6cc3d18fd000f35d28e3755ecb57c1feed925bbcdda9a4d46f763" gracePeriod=30 Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.441859 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="object-replicator" containerID="cri-o://5fa5b197746f4fc6c232971216c9a644e9ab975e961e0c935229cf38a4e633b6" gracePeriod=30 Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.441894 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="object-server" containerID="cri-o://1614de421c052452069aee80467540af7a4813e1f57aea4bdd99541595f16624" gracePeriod=30 Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.441926 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="container-updater" containerID="cri-o://187beb52e9b46c05114bf6a7d8a6f124abb0c3eca374625c1e25c808968452b8" gracePeriod=30 Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.441955 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="container-auditor" containerID="cri-o://030ff0b9ff130c75ae6701e006d9210557f59ef3c03a7bb98a7ca430e97109c9" gracePeriod=30 Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.441987 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="container-replicator" containerID="cri-o://68e3c9997f2af7b46038842848451d18323b773c525c082d57ad2f0fb30df5ed" gracePeriod=30 Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.442020 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="container-server" containerID="cri-o://ab943cdf655ff0be681e72dca8f34c8ac3fb8d5e0e2a1b8ed872d453cb2ea0d6" gracePeriod=30 Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.442051 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="account-reaper" containerID="cri-o://2129598e625a213cd3ba79ba7fbea1e290f821367d76513b2babda344dd0d56d" gracePeriod=30 Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.442099 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="account-replicator" containerID="cri-o://0cd732b1f2842a6991bdbdcfda901598d9442e11547c32597338a8fa53a2b375" gracePeriod=30 Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.442244 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="account-auditor" containerID="cri-o://e2ab8a8678a38130f2659e63954e48baae4462647b6604c3ae9b246a148b5a0e" gracePeriod=30 Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.463020 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-5dg7h"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.474216 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-7srlg"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.494666 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.495888 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="8f43d30e-14e4-4978-bb02-a251305f9330" containerName="glance-log" containerID="cri-o://108372c447325380382fbbd2e70aa8ef323e8b23d29f6e32887e63496ac39324" gracePeriod=30 Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.496679 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="8f43d30e-14e4-4978-bb02-a251305f9330" containerName="glance-httpd" containerID="cri-o://e302bde0bc25b21936e7ca65ca2849db5acaa0ddf0792ac1f5ffccee28c53746" gracePeriod=30 Jan 05 22:15:21 crc kubenswrapper[4910]: I0105 22:15:21.551327 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-7srlg"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:21.572348 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-5dg7h"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:21.580372 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-b0f9-account-create-update-f5pmg" Jan 05 22:15:22 crc kubenswrapper[4910]: E0105 22:15:21.620922 4910 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 05 22:15:22 crc kubenswrapper[4910]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 05 22:15:22 crc kubenswrapper[4910]: Jan 05 22:15:22 crc kubenswrapper[4910]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 05 22:15:22 crc kubenswrapper[4910]: Jan 05 22:15:22 crc kubenswrapper[4910]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 05 22:15:22 crc kubenswrapper[4910]: Jan 05 22:15:22 crc kubenswrapper[4910]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 05 22:15:22 crc kubenswrapper[4910]: Jan 05 22:15:22 crc kubenswrapper[4910]: if [ -n "" ]; then Jan 05 22:15:22 crc kubenswrapper[4910]: GRANT_DATABASE="" Jan 05 22:15:22 crc kubenswrapper[4910]: else Jan 05 22:15:22 crc kubenswrapper[4910]: GRANT_DATABASE="*" Jan 05 22:15:22 crc kubenswrapper[4910]: fi Jan 05 22:15:22 crc kubenswrapper[4910]: Jan 05 22:15:22 crc kubenswrapper[4910]: # going for maximum compatibility here: Jan 05 22:15:22 crc kubenswrapper[4910]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 05 22:15:22 crc kubenswrapper[4910]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 05 22:15:22 crc kubenswrapper[4910]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 05 22:15:22 crc kubenswrapper[4910]: # support updates Jan 05 22:15:22 crc kubenswrapper[4910]: Jan 05 22:15:22 crc kubenswrapper[4910]: $MYSQL_CMD < logger="UnhandledError" Jan 05 22:15:22 crc kubenswrapper[4910]: E0105 22:15:21.622013 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"openstack-cell1-mariadb-root-db-secret\\\" not found\"" pod="openstack/root-account-create-update-lnk9j" podUID="f509687a-bb68-4247-b4de-0f0cb99ca389" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:21.634655 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-0eae-account-create-update-pldpc"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:21.646635 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-0eae-account-create-update-pldpc"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:21.658160 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-7687b85c5d-l8k6w"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:21.658543 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-7687b85c5d-l8k6w" podUID="b29bf6bd-079e-4e8b-bec6-49d4923676af" containerName="placement-log" containerID="cri-o://5f896af4ce5feef15b4dba2b2abb97a685fc637f2ec21e921db5a1f857688437" gracePeriod=30 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:21.659077 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-7687b85c5d-l8k6w" podUID="b29bf6bd-079e-4e8b-bec6-49d4923676af" containerName="placement-api" containerID="cri-o://008ff3c44ce49caf6caea7aa9f55cfc608a8d5e702630f035b8953f4de51ddc1" gracePeriod=30 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:21.687697 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:21.688246 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="70100901-0709-4900-ac75-462a85b350c3" containerName="glance-log" containerID="cri-o://78b733a8056419d98b27c49e64b19c3144941beb236873f5de3f41a43f0fe70b" gracePeriod=30 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:21.688472 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="70100901-0709-4900-ac75-462a85b350c3" containerName="glance-httpd" containerID="cri-o://1e10056784aaab7edb53371b1e8ee1b1dfc4d02346c220a12403d46024abfaa4" gracePeriod=30 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:21.767006 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-x9vtx"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:21.775045 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-9g2kt" podUID="780aad6a-41ff-410c-a6fc-6be2faf38b6f" containerName="ovs-vswitchd" containerID="cri-o://9b905fc177f4972520c21e7cfca7402f4ec2958051e09f353da013b9785c1fd5" gracePeriod=29 Jan 05 22:15:22 crc kubenswrapper[4910]: E0105 22:15:21.788731 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of ae292dd58468a3ca3fe41cf2714cbea7c847466d8e283f8fbe34e48f2fc358f9 is running failed: container process not found" containerID="ae292dd58468a3ca3fe41cf2714cbea7c847466d8e283f8fbe34e48f2fc358f9" cmd=["/usr/bin/pidof","ovsdb-server"] Jan 05 22:15:22 crc kubenswrapper[4910]: E0105 22:15:21.791762 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of ae292dd58468a3ca3fe41cf2714cbea7c847466d8e283f8fbe34e48f2fc358f9 is running failed: container process not found" containerID="ae292dd58468a3ca3fe41cf2714cbea7c847466d8e283f8fbe34e48f2fc358f9" cmd=["/usr/bin/pidof","ovsdb-server"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:21.795328 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-x9vtx"] Jan 05 22:15:22 crc kubenswrapper[4910]: E0105 22:15:21.804516 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of ae292dd58468a3ca3fe41cf2714cbea7c847466d8e283f8fbe34e48f2fc358f9 is running failed: container process not found" containerID="ae292dd58468a3ca3fe41cf2714cbea7c847466d8e283f8fbe34e48f2fc358f9" cmd=["/usr/bin/pidof","ovsdb-server"] Jan 05 22:15:22 crc kubenswrapper[4910]: E0105 22:15:21.804575 4910 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of ae292dd58468a3ca3fe41cf2714cbea7c847466d8e283f8fbe34e48f2fc358f9 is running failed: container process not found" probeType="Readiness" pod="openstack/ovsdbserver-sb-0" podUID="206d2077-4a66-4c6d-aa55-6bf0e0f88c2c" containerName="ovsdbserver-sb" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:21.830646 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-2c36-account-create-update-777vv"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:21.845451 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-867cd545c7-pd68r"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:21.845699 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-867cd545c7-pd68r" podUID="f55a0cf4-44d3-4896-911b-430d13f1f67e" containerName="dnsmasq-dns" containerID="cri-o://5d88d5d67f2af076e38a459d8f23e6f3dfd6d4cf06b6347db6a041118cb2daba" gracePeriod=10 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:21.895842 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-ckwvh"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:21.906780 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-ckwvh"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:21.932429 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:21.940642 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-8d3f-account-create-update-8rkt2"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:21.943954 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtrfx\" (UniqueName: \"kubernetes.io/projected/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-kube-api-access-jtrfx\") pod \"barbican-api-6b8d97d96d-jbcrk\" (UID: \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\") " pod="openstack/barbican-api-6b8d97d96d-jbcrk" Jan 05 22:15:22 crc kubenswrapper[4910]: E0105 22:15:21.944206 4910 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 05 22:15:22 crc kubenswrapper[4910]: E0105 22:15:21.944719 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7e2a3efd-2de7-493e-af91-900b224e5313-config-data podName:7e2a3efd-2de7-493e-af91-900b224e5313 nodeName:}" failed. No retries permitted until 2026-01-05 22:15:23.944697866 +0000 UTC m=+1455.522195536 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/7e2a3efd-2de7-493e-af91-900b224e5313-config-data") pod "rabbitmq-server-0" (UID: "7e2a3efd-2de7-493e-af91-900b224e5313") : configmap "rabbitmq-config-data" not found Jan 05 22:15:22 crc kubenswrapper[4910]: E0105 22:15:21.944801 4910 secret.go:188] Couldn't get secret openstack/barbican-config-data: secret "barbican-config-data" not found Jan 05 22:15:22 crc kubenswrapper[4910]: E0105 22:15:21.944832 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-config-data podName:3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69 nodeName:}" failed. No retries permitted until 2026-01-05 22:15:23.944823449 +0000 UTC m=+1455.522321119 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-config-data") pod "barbican-api-6b8d97d96d-jbcrk" (UID: "3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69") : secret "barbican-config-data" not found Jan 05 22:15:22 crc kubenswrapper[4910]: E0105 22:15:21.947247 4910 projected.go:194] Error preparing data for projected volume kube-api-access-jtrfx for pod openstack/barbican-api-6b8d97d96d-jbcrk: failed to fetch token: serviceaccounts "barbican-barbican" not found Jan 05 22:15:22 crc kubenswrapper[4910]: E0105 22:15:21.947313 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-kube-api-access-jtrfx podName:3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69 nodeName:}" failed. No retries permitted until 2026-01-05 22:15:23.947294311 +0000 UTC m=+1455.524791981 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-jtrfx" (UniqueName: "kubernetes.io/projected/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-kube-api-access-jtrfx") pod "barbican-api-6b8d97d96d-jbcrk" (UID: "3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69") : failed to fetch token: serviceaccounts "barbican-barbican" not found Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:21.959569 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-8d3f-account-create-update-8rkt2"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:21.986191 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.005660 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.005894 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="3486557d-93f8-44c2-b40a-dd8aca19d8e1" containerName="nova-metadata-log" containerID="cri-o://a8b2b4d5b559dae71be16c91b6e3ceb8f53c013e2ed93dca2aa9f32d74982c10" gracePeriod=30 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.006314 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="3486557d-93f8-44c2-b40a-dd8aca19d8e1" containerName="nova-metadata-metadata" containerID="cri-o://9f4f5a94d78ccf55b8b88bf158362b3d9f7fee1d51111812e72271a6887b1360" gracePeriod=30 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.031185 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-sqdcz_266ffadc-b889-4089-9779-c64623269d42/openstack-network-exporter/0.log" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.031318 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-sqdcz" Jan 05 22:15:22 crc kubenswrapper[4910]: E0105 22:15:22.050211 4910 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 05 22:15:22 crc kubenswrapper[4910]: E0105 22:15:22.050271 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b9cedfb5-8c45-434f-b04d-694bf6d600b8-config-data podName:b9cedfb5-8c45-434f-b04d-694bf6d600b8 nodeName:}" failed. No retries permitted until 2026-01-05 22:15:22.550255176 +0000 UTC m=+1454.127752846 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/b9cedfb5-8c45-434f-b04d-694bf6d600b8-config-data") pod "rabbitmq-cell1-server-0" (UID: "b9cedfb5-8c45-434f-b04d-694bf6d600b8") : configmap "rabbitmq-cell1-config-data" not found Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.055505 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_f817e58c-a8aa-4f0d-8486-153659100a11/ovn-northd/0.log" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.055575 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.055920 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-78b74ccb54-wvrcf"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.064511 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-78b74ccb54-wvrcf" podUID="cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b" containerName="barbican-keystone-listener-log" containerID="cri-o://71aa4a23693de64aaa8cbbd15881cbffefc0342e211e96812e957ba634cedcdb" gracePeriod=30 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.065136 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-78b74ccb54-wvrcf" podUID="cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b" containerName="barbican-keystone-listener" containerID="cri-o://66adaa6dc30ca0eb6df8fdbc29cb135171d3e16efce93331526042109780467b" gracePeriod=30 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.078230 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-787f96fcd6-44r4b"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.089696 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-66897dc6c-9tqxs"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.090043 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-66897dc6c-9tqxs" podUID="ce8ea9ec-e799-457a-aaca-e16b591bdf0c" containerName="barbican-worker-log" containerID="cri-o://b200e9f40ae5b0a34ae3718175edc6e00f0e7819999c5ddcf7777af1ffb93d24" gracePeriod=30 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.090075 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-66897dc6c-9tqxs" podUID="ce8ea9ec-e799-457a-aaca-e16b591bdf0c" containerName="barbican-worker" containerID="cri-o://dd977da3f8e7fc9fff03a9de2e1898d7cae116843deeda14da1e479c7ce300a4" gracePeriod=30 Jan 05 22:15:22 crc kubenswrapper[4910]: E0105 22:15:22.100976 4910 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Jan 05 22:15:22 crc kubenswrapper[4910]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Jan 05 22:15:22 crc kubenswrapper[4910]: + source /usr/local/bin/container-scripts/functions Jan 05 22:15:22 crc kubenswrapper[4910]: ++ OVNBridge=br-int Jan 05 22:15:22 crc kubenswrapper[4910]: ++ OVNRemote=tcp:localhost:6642 Jan 05 22:15:22 crc kubenswrapper[4910]: ++ OVNEncapType=geneve Jan 05 22:15:22 crc kubenswrapper[4910]: ++ OVNAvailabilityZones= Jan 05 22:15:22 crc kubenswrapper[4910]: ++ EnableChassisAsGateway=true Jan 05 22:15:22 crc kubenswrapper[4910]: ++ PhysicalNetworks= Jan 05 22:15:22 crc kubenswrapper[4910]: ++ OVNHostName= Jan 05 22:15:22 crc kubenswrapper[4910]: ++ DB_FILE=/etc/openvswitch/conf.db Jan 05 22:15:22 crc kubenswrapper[4910]: ++ ovs_dir=/var/lib/openvswitch Jan 05 22:15:22 crc kubenswrapper[4910]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Jan 05 22:15:22 crc kubenswrapper[4910]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Jan 05 22:15:22 crc kubenswrapper[4910]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 05 22:15:22 crc kubenswrapper[4910]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 05 22:15:22 crc kubenswrapper[4910]: + sleep 0.5 Jan 05 22:15:22 crc kubenswrapper[4910]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 05 22:15:22 crc kubenswrapper[4910]: + sleep 0.5 Jan 05 22:15:22 crc kubenswrapper[4910]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 05 22:15:22 crc kubenswrapper[4910]: + cleanup_ovsdb_server_semaphore Jan 05 22:15:22 crc kubenswrapper[4910]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 05 22:15:22 crc kubenswrapper[4910]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Jan 05 22:15:22 crc kubenswrapper[4910]: > execCommand=["/usr/local/bin/container-scripts/stop-ovsdb-server.sh"] containerName="ovsdb-server" pod="openstack/ovn-controller-ovs-9g2kt" message=< Jan 05 22:15:22 crc kubenswrapper[4910]: Exiting ovsdb-server (5) [ OK ] Jan 05 22:15:22 crc kubenswrapper[4910]: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Jan 05 22:15:22 crc kubenswrapper[4910]: + source /usr/local/bin/container-scripts/functions Jan 05 22:15:22 crc kubenswrapper[4910]: ++ OVNBridge=br-int Jan 05 22:15:22 crc kubenswrapper[4910]: ++ OVNRemote=tcp:localhost:6642 Jan 05 22:15:22 crc kubenswrapper[4910]: ++ OVNEncapType=geneve Jan 05 22:15:22 crc kubenswrapper[4910]: ++ OVNAvailabilityZones= Jan 05 22:15:22 crc kubenswrapper[4910]: ++ EnableChassisAsGateway=true Jan 05 22:15:22 crc kubenswrapper[4910]: ++ PhysicalNetworks= Jan 05 22:15:22 crc kubenswrapper[4910]: ++ OVNHostName= Jan 05 22:15:22 crc kubenswrapper[4910]: ++ DB_FILE=/etc/openvswitch/conf.db Jan 05 22:15:22 crc kubenswrapper[4910]: ++ ovs_dir=/var/lib/openvswitch Jan 05 22:15:22 crc kubenswrapper[4910]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Jan 05 22:15:22 crc kubenswrapper[4910]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Jan 05 22:15:22 crc kubenswrapper[4910]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 05 22:15:22 crc kubenswrapper[4910]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 05 22:15:22 crc kubenswrapper[4910]: + sleep 0.5 Jan 05 22:15:22 crc kubenswrapper[4910]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 05 22:15:22 crc kubenswrapper[4910]: + sleep 0.5 Jan 05 22:15:22 crc kubenswrapper[4910]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 05 22:15:22 crc kubenswrapper[4910]: + cleanup_ovsdb_server_semaphore Jan 05 22:15:22 crc kubenswrapper[4910]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 05 22:15:22 crc kubenswrapper[4910]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Jan 05 22:15:22 crc kubenswrapper[4910]: > Jan 05 22:15:22 crc kubenswrapper[4910]: E0105 22:15:22.101013 4910 kuberuntime_container.go:691] "PreStop hook failed" err=< Jan 05 22:15:22 crc kubenswrapper[4910]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Jan 05 22:15:22 crc kubenswrapper[4910]: + source /usr/local/bin/container-scripts/functions Jan 05 22:15:22 crc kubenswrapper[4910]: ++ OVNBridge=br-int Jan 05 22:15:22 crc kubenswrapper[4910]: ++ OVNRemote=tcp:localhost:6642 Jan 05 22:15:22 crc kubenswrapper[4910]: ++ OVNEncapType=geneve Jan 05 22:15:22 crc kubenswrapper[4910]: ++ OVNAvailabilityZones= Jan 05 22:15:22 crc kubenswrapper[4910]: ++ EnableChassisAsGateway=true Jan 05 22:15:22 crc kubenswrapper[4910]: ++ PhysicalNetworks= Jan 05 22:15:22 crc kubenswrapper[4910]: ++ OVNHostName= Jan 05 22:15:22 crc kubenswrapper[4910]: ++ DB_FILE=/etc/openvswitch/conf.db Jan 05 22:15:22 crc kubenswrapper[4910]: ++ ovs_dir=/var/lib/openvswitch Jan 05 22:15:22 crc kubenswrapper[4910]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Jan 05 22:15:22 crc kubenswrapper[4910]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Jan 05 22:15:22 crc kubenswrapper[4910]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 05 22:15:22 crc kubenswrapper[4910]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 05 22:15:22 crc kubenswrapper[4910]: + sleep 0.5 Jan 05 22:15:22 crc kubenswrapper[4910]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 05 22:15:22 crc kubenswrapper[4910]: + sleep 0.5 Jan 05 22:15:22 crc kubenswrapper[4910]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 05 22:15:22 crc kubenswrapper[4910]: + cleanup_ovsdb_server_semaphore Jan 05 22:15:22 crc kubenswrapper[4910]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 05 22:15:22 crc kubenswrapper[4910]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Jan 05 22:15:22 crc kubenswrapper[4910]: > pod="openstack/ovn-controller-ovs-9g2kt" podUID="780aad6a-41ff-410c-a6fc-6be2faf38b6f" containerName="ovsdb-server" containerID="cri-o://1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.101045 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-9g2kt" podUID="780aad6a-41ff-410c-a6fc-6be2faf38b6f" containerName="ovsdb-server" containerID="cri-o://1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338" gracePeriod=29 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.101276 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-d6c5d94b9-llc4f"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.113922 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-6bbbdf8dc6-s6tmf"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.114719 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" podUID="45acd92f-2e5d-4fc1-8b91-c91f165e786a" containerName="barbican-api" containerID="cri-o://a005751f16bf05306ffd138b7900c870797084700111340ccf797cab547f6f2e" gracePeriod=30 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.114883 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" podUID="45acd92f-2e5d-4fc1-8b91-c91f165e786a" containerName="barbican-api-log" containerID="cri-o://6104667b5ae1cdcd47a597709123b12716141db09f9b433cb838f5a9fceaa70c" gracePeriod=30 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.140578 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-bjzk5"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.151165 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/266ffadc-b889-4089-9779-c64623269d42-ovn-rundir\") pod \"266ffadc-b889-4089-9779-c64623269d42\" (UID: \"266ffadc-b889-4089-9779-c64623269d42\") " Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.151331 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/266ffadc-b889-4089-9779-c64623269d42-ovs-rundir\") pod \"266ffadc-b889-4089-9779-c64623269d42\" (UID: \"266ffadc-b889-4089-9779-c64623269d42\") " Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.151364 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/f817e58c-a8aa-4f0d-8486-153659100a11-ovn-northd-tls-certs\") pod \"f817e58c-a8aa-4f0d-8486-153659100a11\" (UID: \"f817e58c-a8aa-4f0d-8486-153659100a11\") " Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.151426 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f817e58c-a8aa-4f0d-8486-153659100a11-combined-ca-bundle\") pod \"f817e58c-a8aa-4f0d-8486-153659100a11\" (UID: \"f817e58c-a8aa-4f0d-8486-153659100a11\") " Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.151487 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f817e58c-a8aa-4f0d-8486-153659100a11-metrics-certs-tls-certs\") pod \"f817e58c-a8aa-4f0d-8486-153659100a11\" (UID: \"f817e58c-a8aa-4f0d-8486-153659100a11\") " Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.151559 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/266ffadc-b889-4089-9779-c64623269d42-config\") pod \"266ffadc-b889-4089-9779-c64623269d42\" (UID: \"266ffadc-b889-4089-9779-c64623269d42\") " Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.151588 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/266ffadc-b889-4089-9779-c64623269d42-metrics-certs-tls-certs\") pod \"266ffadc-b889-4089-9779-c64623269d42\" (UID: \"266ffadc-b889-4089-9779-c64623269d42\") " Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.151625 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f817e58c-a8aa-4f0d-8486-153659100a11-config\") pod \"f817e58c-a8aa-4f0d-8486-153659100a11\" (UID: \"f817e58c-a8aa-4f0d-8486-153659100a11\") " Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.151736 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/266ffadc-b889-4089-9779-c64623269d42-combined-ca-bundle\") pod \"266ffadc-b889-4089-9779-c64623269d42\" (UID: \"266ffadc-b889-4089-9779-c64623269d42\") " Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.151771 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-79vl9\" (UniqueName: \"kubernetes.io/projected/266ffadc-b889-4089-9779-c64623269d42-kube-api-access-79vl9\") pod \"266ffadc-b889-4089-9779-c64623269d42\" (UID: \"266ffadc-b889-4089-9779-c64623269d42\") " Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.151818 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ljtbt\" (UniqueName: \"kubernetes.io/projected/f817e58c-a8aa-4f0d-8486-153659100a11-kube-api-access-ljtbt\") pod \"f817e58c-a8aa-4f0d-8486-153659100a11\" (UID: \"f817e58c-a8aa-4f0d-8486-153659100a11\") " Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.151859 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f817e58c-a8aa-4f0d-8486-153659100a11-ovn-rundir\") pod \"f817e58c-a8aa-4f0d-8486-153659100a11\" (UID: \"f817e58c-a8aa-4f0d-8486-153659100a11\") " Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.151919 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f817e58c-a8aa-4f0d-8486-153659100a11-scripts\") pod \"f817e58c-a8aa-4f0d-8486-153659100a11\" (UID: \"f817e58c-a8aa-4f0d-8486-153659100a11\") " Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.155160 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/266ffadc-b889-4089-9779-c64623269d42-config" (OuterVolumeSpecName: "config") pod "266ffadc-b889-4089-9779-c64623269d42" (UID: "266ffadc-b889-4089-9779-c64623269d42"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.155219 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/266ffadc-b889-4089-9779-c64623269d42-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "266ffadc-b889-4089-9779-c64623269d42" (UID: "266ffadc-b889-4089-9779-c64623269d42"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.155242 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/266ffadc-b889-4089-9779-c64623269d42-ovs-rundir" (OuterVolumeSpecName: "ovs-rundir") pod "266ffadc-b889-4089-9779-c64623269d42" (UID: "266ffadc-b889-4089-9779-c64623269d42"). InnerVolumeSpecName "ovs-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.156228 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f817e58c-a8aa-4f0d-8486-153659100a11-scripts" (OuterVolumeSpecName: "scripts") pod "f817e58c-a8aa-4f0d-8486-153659100a11" (UID: "f817e58c-a8aa-4f0d-8486-153659100a11"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.156435 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f817e58c-a8aa-4f0d-8486-153659100a11-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "f817e58c-a8aa-4f0d-8486-153659100a11" (UID: "f817e58c-a8aa-4f0d-8486-153659100a11"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.156835 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f817e58c-a8aa-4f0d-8486-153659100a11-config" (OuterVolumeSpecName: "config") pod "f817e58c-a8aa-4f0d-8486-153659100a11" (UID: "f817e58c-a8aa-4f0d-8486-153659100a11"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.162000 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-bjzk5"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.166660 4910 generic.go:334] "Generic (PLEG): container finished" podID="8f43d30e-14e4-4978-bb02-a251305f9330" containerID="108372c447325380382fbbd2e70aa8ef323e8b23d29f6e32887e63496ac39324" exitCode=143 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.166790 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8f43d30e-14e4-4978-bb02-a251305f9330","Type":"ContainerDied","Data":"108372c447325380382fbbd2e70aa8ef323e8b23d29f6e32887e63496ac39324"} Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.174757 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/266ffadc-b889-4089-9779-c64623269d42-kube-api-access-79vl9" (OuterVolumeSpecName: "kube-api-access-79vl9") pod "266ffadc-b889-4089-9779-c64623269d42" (UID: "266ffadc-b889-4089-9779-c64623269d42"). InnerVolumeSpecName "kube-api-access-79vl9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.175249 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f817e58c-a8aa-4f0d-8486-153659100a11-kube-api-access-ljtbt" (OuterVolumeSpecName: "kube-api-access-ljtbt") pod "f817e58c-a8aa-4f0d-8486-153659100a11" (UID: "f817e58c-a8aa-4f0d-8486-153659100a11"). InnerVolumeSpecName "kube-api-access-ljtbt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.187137 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_206d2077-4a66-4c6d-aa55-6bf0e0f88c2c/ovsdbserver-sb/0.log" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.187236 4910 generic.go:334] "Generic (PLEG): container finished" podID="206d2077-4a66-4c6d-aa55-6bf0e0f88c2c" containerID="5e44050096e20cb6a25794fd8f53d149477e83b2422b09ae2b60db07b3bfa76e" exitCode=2 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.187256 4910 generic.go:334] "Generic (PLEG): container finished" podID="206d2077-4a66-4c6d-aa55-6bf0e0f88c2c" containerID="ae292dd58468a3ca3fe41cf2714cbea7c847466d8e283f8fbe34e48f2fc358f9" exitCode=143 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.187331 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c","Type":"ContainerDied","Data":"5e44050096e20cb6a25794fd8f53d149477e83b2422b09ae2b60db07b3bfa76e"} Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.187366 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c","Type":"ContainerDied","Data":"ae292dd58468a3ca3fe41cf2714cbea7c847466d8e283f8fbe34e48f2fc358f9"} Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.201006 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-6b8d97d96d-jbcrk"] Jan 05 22:15:22 crc kubenswrapper[4910]: E0105 22:15:22.201855 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-jtrfx], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/barbican-api-6b8d97d96d-jbcrk" podUID="3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.216086 4910 generic.go:334] "Generic (PLEG): container finished" podID="f55a0cf4-44d3-4896-911b-430d13f1f67e" containerID="5d88d5d67f2af076e38a459d8f23e6f3dfd6d4cf06b6347db6a041118cb2daba" exitCode=0 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.216206 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-867cd545c7-pd68r" event={"ID":"f55a0cf4-44d3-4896-911b-430d13f1f67e","Type":"ContainerDied","Data":"5d88d5d67f2af076e38a459d8f23e6f3dfd6d4cf06b6347db6a041118cb2daba"} Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.221659 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f817e58c-a8aa-4f0d-8486-153659100a11-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f817e58c-a8aa-4f0d-8486-153659100a11" (UID: "f817e58c-a8aa-4f0d-8486-153659100a11"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.223213 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.223497 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="cf7e2b20-58e5-4c61-9e50-c1af51acf521" containerName="nova-api-log" containerID="cri-o://b5b23f1d39fd87015c972670711fa8663521d44165624ef12201ef9f0c36a505" gracePeriod=30 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.223653 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="cf7e2b20-58e5-4c61-9e50-c1af51acf521" containerName="nova-api-api" containerID="cri-o://d7aecd1f8fe9c5ffa799f574329b9dee47f4fd0d6129def71457d2e4db819834" gracePeriod=30 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.235008 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-mpds2"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.241306 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/266ffadc-b889-4089-9779-c64623269d42-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "266ffadc-b889-4089-9779-c64623269d42" (UID: "266ffadc-b889-4089-9779-c64623269d42"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.247562 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-fpldq"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.255293 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f817e58c-a8aa-4f0d-8486-153659100a11-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.255316 4910 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/266ffadc-b889-4089-9779-c64623269d42-ovn-rundir\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.255325 4910 reconciler_common.go:293] "Volume detached for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/266ffadc-b889-4089-9779-c64623269d42-ovs-rundir\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.255334 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f817e58c-a8aa-4f0d-8486-153659100a11-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.255346 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/266ffadc-b889-4089-9779-c64623269d42-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.255356 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f817e58c-a8aa-4f0d-8486-153659100a11-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.255364 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/266ffadc-b889-4089-9779-c64623269d42-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.255376 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-79vl9\" (UniqueName: \"kubernetes.io/projected/266ffadc-b889-4089-9779-c64623269d42-kube-api-access-79vl9\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.255543 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ljtbt\" (UniqueName: \"kubernetes.io/projected/f817e58c-a8aa-4f0d-8486-153659100a11-kube-api-access-ljtbt\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.255712 4910 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f817e58c-a8aa-4f0d-8486-153659100a11-ovn-rundir\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.259294 4910 generic.go:334] "Generic (PLEG): container finished" podID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerID="16a4b970b359fbc6fc563656363aa95e36f86df606367a79bbe2212753463870" exitCode=0 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.259330 4910 generic.go:334] "Generic (PLEG): container finished" podID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerID="831fd24da04e59f5338c337a32590b1382ff92df1e292249c377b504749c88e0" exitCode=0 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.259340 4910 generic.go:334] "Generic (PLEG): container finished" podID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerID="003b527dd2c8b643268b3cff916e9f4b6fbe1f8126957b42aa16a3434a320025" exitCode=0 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.259349 4910 generic.go:334] "Generic (PLEG): container finished" podID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerID="5739ed0c5ca6cc3d18fd000f35d28e3755ecb57c1feed925bbcdda9a4d46f763" exitCode=0 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.259358 4910 generic.go:334] "Generic (PLEG): container finished" podID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerID="5fa5b197746f4fc6c232971216c9a644e9ab975e961e0c935229cf38a4e633b6" exitCode=0 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.259367 4910 generic.go:334] "Generic (PLEG): container finished" podID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerID="1614de421c052452069aee80467540af7a4813e1f57aea4bdd99541595f16624" exitCode=0 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.259375 4910 generic.go:334] "Generic (PLEG): container finished" podID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerID="187beb52e9b46c05114bf6a7d8a6f124abb0c3eca374625c1e25c808968452b8" exitCode=0 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.259383 4910 generic.go:334] "Generic (PLEG): container finished" podID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerID="030ff0b9ff130c75ae6701e006d9210557f59ef3c03a7bb98a7ca430e97109c9" exitCode=0 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.259390 4910 generic.go:334] "Generic (PLEG): container finished" podID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerID="68e3c9997f2af7b46038842848451d18323b773c525c082d57ad2f0fb30df5ed" exitCode=0 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.259400 4910 generic.go:334] "Generic (PLEG): container finished" podID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerID="ab943cdf655ff0be681e72dca8f34c8ac3fb8d5e0e2a1b8ed872d453cb2ea0d6" exitCode=0 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.259407 4910 generic.go:334] "Generic (PLEG): container finished" podID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerID="2129598e625a213cd3ba79ba7fbea1e290f821367d76513b2babda344dd0d56d" exitCode=0 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.259415 4910 generic.go:334] "Generic (PLEG): container finished" podID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerID="e2ab8a8678a38130f2659e63954e48baae4462647b6604c3ae9b246a148b5a0e" exitCode=0 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.259424 4910 generic.go:334] "Generic (PLEG): container finished" podID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerID="0cd732b1f2842a6991bdbdcfda901598d9442e11547c32597338a8fa53a2b375" exitCode=0 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.259497 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c","Type":"ContainerDied","Data":"16a4b970b359fbc6fc563656363aa95e36f86df606367a79bbe2212753463870"} Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.259531 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c","Type":"ContainerDied","Data":"831fd24da04e59f5338c337a32590b1382ff92df1e292249c377b504749c88e0"} Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.259544 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c","Type":"ContainerDied","Data":"003b527dd2c8b643268b3cff916e9f4b6fbe1f8126957b42aa16a3434a320025"} Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.259556 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c","Type":"ContainerDied","Data":"5739ed0c5ca6cc3d18fd000f35d28e3755ecb57c1feed925bbcdda9a4d46f763"} Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.259567 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c","Type":"ContainerDied","Data":"5fa5b197746f4fc6c232971216c9a644e9ab975e961e0c935229cf38a4e633b6"} Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.259577 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c","Type":"ContainerDied","Data":"1614de421c052452069aee80467540af7a4813e1f57aea4bdd99541595f16624"} Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.259586 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c","Type":"ContainerDied","Data":"187beb52e9b46c05114bf6a7d8a6f124abb0c3eca374625c1e25c808968452b8"} Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.259595 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c","Type":"ContainerDied","Data":"030ff0b9ff130c75ae6701e006d9210557f59ef3c03a7bb98a7ca430e97109c9"} Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.259606 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c","Type":"ContainerDied","Data":"68e3c9997f2af7b46038842848451d18323b773c525c082d57ad2f0fb30df5ed"} Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.259615 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c","Type":"ContainerDied","Data":"ab943cdf655ff0be681e72dca8f34c8ac3fb8d5e0e2a1b8ed872d453cb2ea0d6"} Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.259626 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c","Type":"ContainerDied","Data":"2129598e625a213cd3ba79ba7fbea1e290f821367d76513b2babda344dd0d56d"} Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.259636 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c","Type":"ContainerDied","Data":"e2ab8a8678a38130f2659e63954e48baae4462647b6604c3ae9b246a148b5a0e"} Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.259647 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c","Type":"ContainerDied","Data":"0cd732b1f2842a6991bdbdcfda901598d9442e11547c32597338a8fa53a2b375"} Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.262096 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-mpds2"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.263443 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-lnk9j" event={"ID":"f509687a-bb68-4247-b4de-0f0cb99ca389","Type":"ContainerStarted","Data":"362337c9ef5e7da45f6ff21f34ff3984bb831ae81a7ed1bec943f6f7b14571ec"} Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.265008 4910 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/root-account-create-update-lnk9j" secret="" err="secret \"galera-openstack-cell1-dockercfg-6b5kw\" not found" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.266365 4910 generic.go:334] "Generic (PLEG): container finished" podID="9253fb1e-9dce-4e54-80ee-fba5e3152596" containerID="367036c1944402d903c48f5737322433b3dce3b8986e5bf1249815eb02e56af6" exitCode=0 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.266481 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-cfp97" event={"ID":"9253fb1e-9dce-4e54-80ee-fba5e3152596","Type":"ContainerDied","Data":"367036c1944402d903c48f5737322433b3dce3b8986e5bf1249815eb02e56af6"} Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.281333 4910 generic.go:334] "Generic (PLEG): container finished" podID="227b48c0-2e23-4048-8fb5-21628bd9e5e0" containerID="0ce63635905b4359223cc707716af9867aeeb87e2e260750761f5c1bca381777" exitCode=0 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.281426 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6c69d8c8f7-7w2gb" event={"ID":"227b48c0-2e23-4048-8fb5-21628bd9e5e0","Type":"ContainerDied","Data":"0ce63635905b4359223cc707716af9867aeeb87e2e260750761f5c1bca381777"} Jan 05 22:15:22 crc kubenswrapper[4910]: E0105 22:15:22.303954 4910 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 05 22:15:22 crc kubenswrapper[4910]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 05 22:15:22 crc kubenswrapper[4910]: Jan 05 22:15:22 crc kubenswrapper[4910]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 05 22:15:22 crc kubenswrapper[4910]: Jan 05 22:15:22 crc kubenswrapper[4910]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 05 22:15:22 crc kubenswrapper[4910]: Jan 05 22:15:22 crc kubenswrapper[4910]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 05 22:15:22 crc kubenswrapper[4910]: Jan 05 22:15:22 crc kubenswrapper[4910]: if [ -n "" ]; then Jan 05 22:15:22 crc kubenswrapper[4910]: GRANT_DATABASE="" Jan 05 22:15:22 crc kubenswrapper[4910]: else Jan 05 22:15:22 crc kubenswrapper[4910]: GRANT_DATABASE="*" Jan 05 22:15:22 crc kubenswrapper[4910]: fi Jan 05 22:15:22 crc kubenswrapper[4910]: Jan 05 22:15:22 crc kubenswrapper[4910]: # going for maximum compatibility here: Jan 05 22:15:22 crc kubenswrapper[4910]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 05 22:15:22 crc kubenswrapper[4910]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 05 22:15:22 crc kubenswrapper[4910]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 05 22:15:22 crc kubenswrapper[4910]: # support updates Jan 05 22:15:22 crc kubenswrapper[4910]: Jan 05 22:15:22 crc kubenswrapper[4910]: $MYSQL_CMD < logger="UnhandledError" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.304392 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-fpldq"] Jan 05 22:15:22 crc kubenswrapper[4910]: E0105 22:15:22.305658 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"openstack-cell1-mariadb-root-db-secret\\\" not found\"" pod="openstack/root-account-create-update-lnk9j" podUID="f509687a-bb68-4247-b4de-0f0cb99ca389" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.326553 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.326772 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="da2a33ae-86a0-465d-a05e-89007e39e580" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://9508ef451ff0f7e73dc0cfea8eda8b03067704bfee4c29361c6f466617631e69" gracePeriod=30 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.336866 4910 generic.go:334] "Generic (PLEG): container finished" podID="07efd759-c536-425d-938e-a8ccd41706cd" containerID="6daa2eb7900c845da95b4889f00144bf520b49eeafeeefc6d62129f8760b3df1" exitCode=143 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.336950 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"07efd759-c536-425d-938e-a8ccd41706cd","Type":"ContainerDied","Data":"6daa2eb7900c845da95b4889f00144bf520b49eeafeeefc6d62129f8760b3df1"} Jan 05 22:15:22 crc kubenswrapper[4910]: E0105 22:15:22.359663 4910 configmap.go:193] Couldn't get configMap openstack/openstack-cell1-scripts: configmap "openstack-cell1-scripts" not found Jan 05 22:15:22 crc kubenswrapper[4910]: E0105 22:15:22.359733 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f509687a-bb68-4247-b4de-0f0cb99ca389-operator-scripts podName:f509687a-bb68-4247-b4de-0f0cb99ca389 nodeName:}" failed. No retries permitted until 2026-01-05 22:15:22.859719967 +0000 UTC m=+1454.437217627 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/f509687a-bb68-4247-b4de-0f0cb99ca389-operator-scripts") pod "root-account-create-update-lnk9j" (UID: "f509687a-bb68-4247-b4de-0f0cb99ca389") : configmap "openstack-cell1-scripts" not found Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.362154 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-1a42-account-create-update-4vz6m"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.382170 4910 generic.go:334] "Generic (PLEG): container finished" podID="70100901-0709-4900-ac75-462a85b350c3" containerID="78b733a8056419d98b27c49e64b19c3144941beb236873f5de3f41a43f0fe70b" exitCode=143 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.382236 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"70100901-0709-4900-ac75-462a85b350c3","Type":"ContainerDied","Data":"78b733a8056419d98b27c49e64b19c3144941beb236873f5de3f41a43f0fe70b"} Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.403205 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-5vttc"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.432318 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/266ffadc-b889-4089-9779-c64623269d42-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "266ffadc-b889-4089-9779-c64623269d42" (UID: "266ffadc-b889-4089-9779-c64623269d42"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.442750 4910 generic.go:334] "Generic (PLEG): container finished" podID="b29bf6bd-079e-4e8b-bec6-49d4923676af" containerID="5f896af4ce5feef15b4dba2b2abb97a685fc637f2ec21e921db5a1f857688437" exitCode=143 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.442876 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7687b85c5d-l8k6w" event={"ID":"b29bf6bd-079e-4e8b-bec6-49d4923676af","Type":"ContainerDied","Data":"5f896af4ce5feef15b4dba2b2abb97a685fc637f2ec21e921db5a1f857688437"} Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.453414 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-5vttc"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.458061 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-b0f9-account-create-update-f5pmg"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.461818 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-sqdcz_266ffadc-b889-4089-9779-c64623269d42/openstack-network-exporter/0.log" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.461905 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-sqdcz" event={"ID":"266ffadc-b889-4089-9779-c64623269d42","Type":"ContainerDied","Data":"0abeec70948d2691606e88cd47569b900e6929c45325e7ee02d3961b959ea6a4"} Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.461939 4910 scope.go:117] "RemoveContainer" containerID="0aed0283be1d2d7717625b9ca57d441f05965d2b141a4e5d7c184eead1f9c999" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.462071 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-sqdcz" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.463849 4910 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/266ffadc-b889-4089-9779-c64623269d42-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.484685 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.507302 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-lnk9j"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.520495 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_c6909118-b0ce-402c-8bb4-7ce665250739/ovsdbserver-nb/0.log" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.520547 4910 generic.go:334] "Generic (PLEG): container finished" podID="c6909118-b0ce-402c-8bb4-7ce665250739" containerID="59360022044ca27d9ee5757033d8f3c5a80aee6675d87823287602295063b5ec" exitCode=2 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.520565 4910 generic.go:334] "Generic (PLEG): container finished" podID="c6909118-b0ce-402c-8bb4-7ce665250739" containerID="839ae5ea9fbfa6a3aa2a1bb5b86ebdc2961253bc0eb7f9f7fe77393230b78a2e" exitCode=143 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.520654 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"c6909118-b0ce-402c-8bb4-7ce665250739","Type":"ContainerDied","Data":"59360022044ca27d9ee5757033d8f3c5a80aee6675d87823287602295063b5ec"} Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.520683 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"c6909118-b0ce-402c-8bb4-7ce665250739","Type":"ContainerDied","Data":"839ae5ea9fbfa6a3aa2a1bb5b86ebdc2961253bc0eb7f9f7fe77393230b78a2e"} Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.537832 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-lnk9j"] Jan 05 22:15:22 crc kubenswrapper[4910]: E0105 22:15:22.568326 4910 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 05 22:15:22 crc kubenswrapper[4910]: E0105 22:15:22.568701 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b9cedfb5-8c45-434f-b04d-694bf6d600b8-config-data podName:b9cedfb5-8c45-434f-b04d-694bf6d600b8 nodeName:}" failed. No retries permitted until 2026-01-05 22:15:23.568684663 +0000 UTC m=+1455.146182333 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/b9cedfb5-8c45-434f-b04d-694bf6d600b8-config-data") pod "rabbitmq-cell1-server-0" (UID: "b9cedfb5-8c45-434f-b04d-694bf6d600b8") : configmap "rabbitmq-cell1-config-data" not found Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.589613 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_f817e58c-a8aa-4f0d-8486-153659100a11/ovn-northd/0.log" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.589660 4910 generic.go:334] "Generic (PLEG): container finished" podID="f817e58c-a8aa-4f0d-8486-153659100a11" containerID="6ca0b92febedb3c5a00c06e828a71d381ce355e0b02c17a53a8e077be5c0a627" exitCode=2 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.589679 4910 generic.go:334] "Generic (PLEG): container finished" podID="f817e58c-a8aa-4f0d-8486-153659100a11" containerID="775a40a61c5552bd695159a6992312acd259814d06d50b9be807a55dfc4a58bf" exitCode=143 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.589714 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"f817e58c-a8aa-4f0d-8486-153659100a11","Type":"ContainerDied","Data":"6ca0b92febedb3c5a00c06e828a71d381ce355e0b02c17a53a8e077be5c0a627"} Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.589745 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"f817e58c-a8aa-4f0d-8486-153659100a11","Type":"ContainerDied","Data":"775a40a61c5552bd695159a6992312acd259814d06d50b9be807a55dfc4a58bf"} Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.589757 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"f817e58c-a8aa-4f0d-8486-153659100a11","Type":"ContainerDied","Data":"bfd4ad6c8a0477ecc83191d5bd773c2ac268eebcf23ed43a91863855f05d336b"} Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.589852 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.625142 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f817e58c-a8aa-4f0d-8486-153659100a11-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "f817e58c-a8aa-4f0d-8486-153659100a11" (UID: "f817e58c-a8aa-4f0d-8486-153659100a11"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.625310 4910 scope.go:117] "RemoveContainer" containerID="6ca0b92febedb3c5a00c06e828a71d381ce355e0b02c17a53a8e077be5c0a627" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.631578 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f817e58c-a8aa-4f0d-8486-153659100a11-ovn-northd-tls-certs" (OuterVolumeSpecName: "ovn-northd-tls-certs") pod "f817e58c-a8aa-4f0d-8486-153659100a11" (UID: "f817e58c-a8aa-4f0d-8486-153659100a11"). InnerVolumeSpecName "ovn-northd-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.648334 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.648542 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="83319bb4-7278-49b3-8ef2-beb8baa0a1a6" containerName="nova-scheduler-scheduler" containerID="cri-o://03e2a0482d96bb74144b1ebf3502bf0c9e701db7ab42a851ca5abd53fadbfdf7" gracePeriod=30 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.671736 4910 reconciler_common.go:293] "Volume detached for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/f817e58c-a8aa-4f0d-8486-153659100a11-ovn-northd-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.671777 4910 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f817e58c-a8aa-4f0d-8486-153659100a11-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.677020 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-gqjqz"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.677775 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="7e2a3efd-2de7-493e-af91-900b224e5313" containerName="rabbitmq" containerID="cri-o://642125ac821bb754a0c42680f8f99f5a13b1a90ac3a61d0a934715684f4248eb" gracePeriod=604800 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.687151 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-cell1-galera-0" podUID="f9587597-0dcc-4c3a-b578-f9797dd2f9c1" containerName="galera" containerID="cri-o://e582b0a3f3996c075f0d0a3ac06e81dc222960f93f515619da44899ee0b2bce4" gracePeriod=30 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.702348 4910 scope.go:117] "RemoveContainer" containerID="775a40a61c5552bd695159a6992312acd259814d06d50b9be807a55dfc4a58bf" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.708642 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.708961 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="70694d65-fa64-4667-b1aa-bac50650687c" containerName="nova-cell1-conductor-conductor" containerID="cri-o://708b16276678b2822ae86c9c52e58e344dbcf830fd5f034e5d7cb53f881b9997" gracePeriod=30 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.718536 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-gqjqz"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.771859 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0284aa1c-869a-43b1-9984-488eaed6ba0b" path="/var/lib/kubelet/pods/0284aa1c-869a-43b1-9984-488eaed6ba0b/volumes" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.774743 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2219fd99-b8c4-4918-8aa2-7f59a307dec5" path="/var/lib/kubelet/pods/2219fd99-b8c4-4918-8aa2-7f59a307dec5/volumes" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.775464 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="267d700c-88ab-4264-8ee9-cb3b02d10b23" path="/var/lib/kubelet/pods/267d700c-88ab-4264-8ee9-cb3b02d10b23/volumes" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.776099 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2fd0274e-5312-4b6e-be52-03e243ac4e6b" path="/var/lib/kubelet/pods/2fd0274e-5312-4b6e-be52-03e243ac4e6b/volumes" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.777383 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4432a67a-7276-4f55-838d-b685529581d5" path="/var/lib/kubelet/pods/4432a67a-7276-4f55-838d-b685529581d5/volumes" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.777920 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44f006eb-f848-4351-914e-9a9e751194a3" path="/var/lib/kubelet/pods/44f006eb-f848-4351-914e-9a9e751194a3/volumes" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.778474 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="46c3eb83-1eeb-4c44-b474-73e50c5afd6e" path="/var/lib/kubelet/pods/46c3eb83-1eeb-4c44-b474-73e50c5afd6e/volumes" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.779001 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="569058f0-d9dd-45de-a0ce-dd38bb6ce341" path="/var/lib/kubelet/pods/569058f0-d9dd-45de-a0ce-dd38bb6ce341/volumes" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.780238 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="607e486d-f70e-413c-8568-db15e01a3377" path="/var/lib/kubelet/pods/607e486d-f70e-413c-8568-db15e01a3377/volumes" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.780724 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d9a1e15-a5b4-46bd-89d0-92c58b63c416" path="/var/lib/kubelet/pods/6d9a1e15-a5b4-46bd-89d0-92c58b63c416/volumes" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.781257 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77d19d69-6202-4594-8b11-e02ff86dc8f6" path="/var/lib/kubelet/pods/77d19d69-6202-4594-8b11-e02ff86dc8f6/volumes" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.790061 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba" path="/var/lib/kubelet/pods/7dd7aaf9-4f0f-4c8a-ac0e-99d04d6a12ba/volumes" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.791473 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8892663e-d012-478a-99ea-8cff1f7c9b35" path="/var/lib/kubelet/pods/8892663e-d012-478a-99ea-8cff1f7c9b35/volumes" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.793448 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8a77378-db31-4715-b92e-2edc06b352a5" path="/var/lib/kubelet/pods/a8a77378-db31-4715-b92e-2edc06b352a5/volumes" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.796021 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a90f601c-a3b6-496b-9f50-2ecde1cb123b" path="/var/lib/kubelet/pods/a90f601c-a3b6-496b-9f50-2ecde1cb123b/volumes" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.796656 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5a1f57c-578a-4396-95b1-e09d6ac92383" path="/var/lib/kubelet/pods/b5a1f57c-578a-4396-95b1-e09d6ac92383/volumes" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.797648 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b94e459d-172c-41ca-a38c-384a5f3e323e" path="/var/lib/kubelet/pods/b94e459d-172c-41ca-a38c-384a5f3e323e/volumes" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.798904 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dda642c8-96ed-4c08-be87-119551bcd735" path="/var/lib/kubelet/pods/dda642c8-96ed-4c08-be87-119551bcd735/volumes" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.799642 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e520b140-ba86-4e17-82d2-4e8c4dc15474" path="/var/lib/kubelet/pods/e520b140-ba86-4e17-82d2-4e8c4dc15474/volumes" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.801281 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f127b53b-cd48-48bd-b890-2dd47e1abd37" path="/var/lib/kubelet/pods/f127b53b-cd48-48bd-b890-2dd47e1abd37/volumes" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.801933 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f72b3b94-d06e-444e-bfdf-d9fbb4d46db2" path="/var/lib/kubelet/pods/f72b3b94-d06e-444e-bfdf-d9fbb4d46db2/volumes" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.802875 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8900433-ac66-443f-8d83-72fefd413abd" path="/var/lib/kubelet/pods/f8900433-ac66-443f-8d83-72fefd413abd/volumes" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.805984 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.806016 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.806029 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-2r4mt"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.806043 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-2r4mt"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.806066 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-sqdcz"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.806077 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-metrics-sqdcz"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.806297 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="de8aafdf-9b35-4c41-8726-6c7e86edee5f" containerName="nova-cell0-conductor-conductor" containerID="cri-o://38699171184dfa46b8af02c0e7a8bf314316f1f3e8f7f4d2c59c764a37fae22a" gracePeriod=30 Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.809310 4910 scope.go:117] "RemoveContainer" containerID="6ca0b92febedb3c5a00c06e828a71d381ce355e0b02c17a53a8e077be5c0a627" Jan 05 22:15:22 crc kubenswrapper[4910]: E0105 22:15:22.812330 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ca0b92febedb3c5a00c06e828a71d381ce355e0b02c17a53a8e077be5c0a627\": container with ID starting with 6ca0b92febedb3c5a00c06e828a71d381ce355e0b02c17a53a8e077be5c0a627 not found: ID does not exist" containerID="6ca0b92febedb3c5a00c06e828a71d381ce355e0b02c17a53a8e077be5c0a627" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.812382 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ca0b92febedb3c5a00c06e828a71d381ce355e0b02c17a53a8e077be5c0a627"} err="failed to get container status \"6ca0b92febedb3c5a00c06e828a71d381ce355e0b02c17a53a8e077be5c0a627\": rpc error: code = NotFound desc = could not find container \"6ca0b92febedb3c5a00c06e828a71d381ce355e0b02c17a53a8e077be5c0a627\": container with ID starting with 6ca0b92febedb3c5a00c06e828a71d381ce355e0b02c17a53a8e077be5c0a627 not found: ID does not exist" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.812413 4910 scope.go:117] "RemoveContainer" containerID="775a40a61c5552bd695159a6992312acd259814d06d50b9be807a55dfc4a58bf" Jan 05 22:15:22 crc kubenswrapper[4910]: E0105 22:15:22.817050 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"775a40a61c5552bd695159a6992312acd259814d06d50b9be807a55dfc4a58bf\": container with ID starting with 775a40a61c5552bd695159a6992312acd259814d06d50b9be807a55dfc4a58bf not found: ID does not exist" containerID="775a40a61c5552bd695159a6992312acd259814d06d50b9be807a55dfc4a58bf" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.817095 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"775a40a61c5552bd695159a6992312acd259814d06d50b9be807a55dfc4a58bf"} err="failed to get container status \"775a40a61c5552bd695159a6992312acd259814d06d50b9be807a55dfc4a58bf\": rpc error: code = NotFound desc = could not find container \"775a40a61c5552bd695159a6992312acd259814d06d50b9be807a55dfc4a58bf\": container with ID starting with 775a40a61c5552bd695159a6992312acd259814d06d50b9be807a55dfc4a58bf not found: ID does not exist" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.817133 4910 scope.go:117] "RemoveContainer" containerID="6ca0b92febedb3c5a00c06e828a71d381ce355e0b02c17a53a8e077be5c0a627" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.818201 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ca0b92febedb3c5a00c06e828a71d381ce355e0b02c17a53a8e077be5c0a627"} err="failed to get container status \"6ca0b92febedb3c5a00c06e828a71d381ce355e0b02c17a53a8e077be5c0a627\": rpc error: code = NotFound desc = could not find container \"6ca0b92febedb3c5a00c06e828a71d381ce355e0b02c17a53a8e077be5c0a627\": container with ID starting with 6ca0b92febedb3c5a00c06e828a71d381ce355e0b02c17a53a8e077be5c0a627 not found: ID does not exist" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.818228 4910 scope.go:117] "RemoveContainer" containerID="775a40a61c5552bd695159a6992312acd259814d06d50b9be807a55dfc4a58bf" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.822283 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"775a40a61c5552bd695159a6992312acd259814d06d50b9be807a55dfc4a58bf"} err="failed to get container status \"775a40a61c5552bd695159a6992312acd259814d06d50b9be807a55dfc4a58bf\": rpc error: code = NotFound desc = could not find container \"775a40a61c5552bd695159a6992312acd259814d06d50b9be807a55dfc4a58bf\": container with ID starting with 775a40a61c5552bd695159a6992312acd259814d06d50b9be807a55dfc4a58bf not found: ID does not exist" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.860787 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="b9cedfb5-8c45-434f-b04d-694bf6d600b8" containerName="rabbitmq" containerID="cri-o://2c95bc32934ba46ce9701d8eb4e4fdb43de1b82593499f287b4f2c6458380007" gracePeriod=604800 Jan 05 22:15:22 crc kubenswrapper[4910]: E0105 22:15:22.878483 4910 configmap.go:193] Couldn't get configMap openstack/openstack-cell1-scripts: configmap "openstack-cell1-scripts" not found Jan 05 22:15:22 crc kubenswrapper[4910]: E0105 22:15:22.878559 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f509687a-bb68-4247-b4de-0f0cb99ca389-operator-scripts podName:f509687a-bb68-4247-b4de-0f0cb99ca389 nodeName:}" failed. No retries permitted until 2026-01-05 22:15:23.878543513 +0000 UTC m=+1455.456041183 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/f509687a-bb68-4247-b4de-0f0cb99ca389-operator-scripts") pod "root-account-create-update-lnk9j" (UID: "f509687a-bb68-4247-b4de-0f0cb99ca389") : configmap "openstack-cell1-scripts" not found Jan 05 22:15:22 crc kubenswrapper[4910]: E0105 22:15:22.917758 4910 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf817e58c_a8aa_4f0d_8486_153659100a11.slice/crio-bfd4ad6c8a0477ecc83191d5bd773c2ac268eebcf23ed43a91863855f05d336b\": RecentStats: unable to find data in memory cache]" Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.922638 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Jan 05 22:15:22 crc kubenswrapper[4910]: I0105 22:15:22.930875 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-northd-0"] Jan 05 22:15:22 crc kubenswrapper[4910]: E0105 22:15:22.936467 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="03e2a0482d96bb74144b1ebf3502bf0c9e701db7ab42a851ca5abd53fadbfdf7" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 05 22:15:22 crc kubenswrapper[4910]: E0105 22:15:22.937769 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="03e2a0482d96bb74144b1ebf3502bf0c9e701db7ab42a851ca5abd53fadbfdf7" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 05 22:15:22 crc kubenswrapper[4910]: E0105 22:15:22.940756 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="03e2a0482d96bb74144b1ebf3502bf0c9e701db7ab42a851ca5abd53fadbfdf7" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 05 22:15:22 crc kubenswrapper[4910]: E0105 22:15:22.940814 4910 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="83319bb4-7278-49b3-8ef2-beb8baa0a1a6" containerName="nova-scheduler-scheduler" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.208060 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-cfp97" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.220800 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-867cd545c7-pd68r" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.253057 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_206d2077-4a66-4c6d-aa55-6bf0e0f88c2c/ovsdbserver-sb/0.log" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.253341 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.272599 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_c6909118-b0ce-402c-8bb4-7ce665250739/ovsdbserver-nb/0.log" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.272722 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.294821 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f55a0cf4-44d3-4896-911b-430d13f1f67e-config\") pod \"f55a0cf4-44d3-4896-911b-430d13f1f67e\" (UID: \"f55a0cf4-44d3-4896-911b-430d13f1f67e\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.294896 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w2ssq\" (UniqueName: \"kubernetes.io/projected/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-kube-api-access-w2ssq\") pod \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.294948 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-scripts\") pod \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.294983 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-combined-ca-bundle\") pod \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.295005 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-ovsdbserver-sb-tls-certs\") pod \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.295046 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/9253fb1e-9dce-4e54-80ee-fba5e3152596-ovn-controller-tls-certs\") pod \"9253fb1e-9dce-4e54-80ee-fba5e3152596\" (UID: \"9253fb1e-9dce-4e54-80ee-fba5e3152596\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.295079 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9253fb1e-9dce-4e54-80ee-fba5e3152596-var-log-ovn\") pod \"9253fb1e-9dce-4e54-80ee-fba5e3152596\" (UID: \"9253fb1e-9dce-4e54-80ee-fba5e3152596\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.295136 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-sb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.295164 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f55a0cf4-44d3-4896-911b-430d13f1f67e-dns-svc\") pod \"f55a0cf4-44d3-4896-911b-430d13f1f67e\" (UID: \"f55a0cf4-44d3-4896-911b-430d13f1f67e\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.295207 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9253fb1e-9dce-4e54-80ee-fba5e3152596-var-run-ovn\") pod \"9253fb1e-9dce-4e54-80ee-fba5e3152596\" (UID: \"9253fb1e-9dce-4e54-80ee-fba5e3152596\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.295233 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4m82w\" (UniqueName: \"kubernetes.io/projected/9253fb1e-9dce-4e54-80ee-fba5e3152596-kube-api-access-4m82w\") pod \"9253fb1e-9dce-4e54-80ee-fba5e3152596\" (UID: \"9253fb1e-9dce-4e54-80ee-fba5e3152596\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.295255 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-ovsdb-rundir\") pod \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.295279 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-config\") pod \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.295297 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f55a0cf4-44d3-4896-911b-430d13f1f67e-dns-swift-storage-0\") pod \"f55a0cf4-44d3-4896-911b-430d13f1f67e\" (UID: \"f55a0cf4-44d3-4896-911b-430d13f1f67e\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.295317 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f55a0cf4-44d3-4896-911b-430d13f1f67e-ovsdbserver-sb\") pod \"f55a0cf4-44d3-4896-911b-430d13f1f67e\" (UID: \"f55a0cf4-44d3-4896-911b-430d13f1f67e\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.295346 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mwx6h\" (UniqueName: \"kubernetes.io/projected/f55a0cf4-44d3-4896-911b-430d13f1f67e-kube-api-access-mwx6h\") pod \"f55a0cf4-44d3-4896-911b-430d13f1f67e\" (UID: \"f55a0cf4-44d3-4896-911b-430d13f1f67e\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.295372 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9253fb1e-9dce-4e54-80ee-fba5e3152596-combined-ca-bundle\") pod \"9253fb1e-9dce-4e54-80ee-fba5e3152596\" (UID: \"9253fb1e-9dce-4e54-80ee-fba5e3152596\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.295394 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-metrics-certs-tls-certs\") pod \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\" (UID: \"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.295413 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9253fb1e-9dce-4e54-80ee-fba5e3152596-var-run\") pod \"9253fb1e-9dce-4e54-80ee-fba5e3152596\" (UID: \"9253fb1e-9dce-4e54-80ee-fba5e3152596\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.295472 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9253fb1e-9dce-4e54-80ee-fba5e3152596-scripts\") pod \"9253fb1e-9dce-4e54-80ee-fba5e3152596\" (UID: \"9253fb1e-9dce-4e54-80ee-fba5e3152596\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.295491 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f55a0cf4-44d3-4896-911b-430d13f1f67e-ovsdbserver-nb\") pod \"f55a0cf4-44d3-4896-911b-430d13f1f67e\" (UID: \"f55a0cf4-44d3-4896-911b-430d13f1f67e\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.295632 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9253fb1e-9dce-4e54-80ee-fba5e3152596-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "9253fb1e-9dce-4e54-80ee-fba5e3152596" (UID: "9253fb1e-9dce-4e54-80ee-fba5e3152596"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.295936 4910 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9253fb1e-9dce-4e54-80ee-fba5e3152596-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.296881 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "206d2077-4a66-4c6d-aa55-6bf0e0f88c2c" (UID: "206d2077-4a66-4c6d-aa55-6bf0e0f88c2c"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.298237 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9253fb1e-9dce-4e54-80ee-fba5e3152596-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "9253fb1e-9dce-4e54-80ee-fba5e3152596" (UID: "9253fb1e-9dce-4e54-80ee-fba5e3152596"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.301007 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-config" (OuterVolumeSpecName: "config") pod "206d2077-4a66-4c6d-aa55-6bf0e0f88c2c" (UID: "206d2077-4a66-4c6d-aa55-6bf0e0f88c2c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.301447 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9253fb1e-9dce-4e54-80ee-fba5e3152596-var-run" (OuterVolumeSpecName: "var-run") pod "9253fb1e-9dce-4e54-80ee-fba5e3152596" (UID: "9253fb1e-9dce-4e54-80ee-fba5e3152596"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.310310 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-scripts" (OuterVolumeSpecName: "scripts") pod "206d2077-4a66-4c6d-aa55-6bf0e0f88c2c" (UID: "206d2077-4a66-4c6d-aa55-6bf0e0f88c2c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.311047 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9253fb1e-9dce-4e54-80ee-fba5e3152596-scripts" (OuterVolumeSpecName: "scripts") pod "9253fb1e-9dce-4e54-80ee-fba5e3152596" (UID: "9253fb1e-9dce-4e54-80ee-fba5e3152596"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.321497 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f55a0cf4-44d3-4896-911b-430d13f1f67e-kube-api-access-mwx6h" (OuterVolumeSpecName: "kube-api-access-mwx6h") pod "f55a0cf4-44d3-4896-911b-430d13f1f67e" (UID: "f55a0cf4-44d3-4896-911b-430d13f1f67e"). InnerVolumeSpecName "kube-api-access-mwx6h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.326273 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9253fb1e-9dce-4e54-80ee-fba5e3152596-kube-api-access-4m82w" (OuterVolumeSpecName: "kube-api-access-4m82w") pod "9253fb1e-9dce-4e54-80ee-fba5e3152596" (UID: "9253fb1e-9dce-4e54-80ee-fba5e3152596"). InnerVolumeSpecName "kube-api-access-4m82w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.331394 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-kube-api-access-w2ssq" (OuterVolumeSpecName: "kube-api-access-w2ssq") pod "206d2077-4a66-4c6d-aa55-6bf0e0f88c2c" (UID: "206d2077-4a66-4c6d-aa55-6bf0e0f88c2c"). InnerVolumeSpecName "kube-api-access-w2ssq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.350040 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "ovndbcluster-sb-etc-ovn") pod "206d2077-4a66-4c6d-aa55-6bf0e0f88c2c" (UID: "206d2077-4a66-4c6d-aa55-6bf0e0f88c2c"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.387504 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9253fb1e-9dce-4e54-80ee-fba5e3152596-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9253fb1e-9dce-4e54-80ee-fba5e3152596" (UID: "9253fb1e-9dce-4e54-80ee-fba5e3152596"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.399687 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c6909118-b0ce-402c-8bb4-7ce665250739-scripts\") pod \"c6909118-b0ce-402c-8bb4-7ce665250739\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.399752 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c6909118-b0ce-402c-8bb4-7ce665250739-config\") pod \"c6909118-b0ce-402c-8bb4-7ce665250739\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.399822 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-nb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"c6909118-b0ce-402c-8bb4-7ce665250739\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.399860 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6909118-b0ce-402c-8bb4-7ce665250739-combined-ca-bundle\") pod \"c6909118-b0ce-402c-8bb4-7ce665250739\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.399935 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k24n8\" (UniqueName: \"kubernetes.io/projected/c6909118-b0ce-402c-8bb4-7ce665250739-kube-api-access-k24n8\") pod \"c6909118-b0ce-402c-8bb4-7ce665250739\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.400081 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6909118-b0ce-402c-8bb4-7ce665250739-ovsdbserver-nb-tls-certs\") pod \"c6909118-b0ce-402c-8bb4-7ce665250739\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.400103 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c6909118-b0ce-402c-8bb4-7ce665250739-ovsdb-rundir\") pod \"c6909118-b0ce-402c-8bb4-7ce665250739\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.400163 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6909118-b0ce-402c-8bb4-7ce665250739-metrics-certs-tls-certs\") pod \"c6909118-b0ce-402c-8bb4-7ce665250739\" (UID: \"c6909118-b0ce-402c-8bb4-7ce665250739\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.400680 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9253fb1e-9dce-4e54-80ee-fba5e3152596-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.400697 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w2ssq\" (UniqueName: \"kubernetes.io/projected/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-kube-api-access-w2ssq\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.400708 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.400718 4910 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9253fb1e-9dce-4e54-80ee-fba5e3152596-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.400738 4910 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.400746 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4m82w\" (UniqueName: \"kubernetes.io/projected/9253fb1e-9dce-4e54-80ee-fba5e3152596-kube-api-access-4m82w\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.400755 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.400765 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.400778 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mwx6h\" (UniqueName: \"kubernetes.io/projected/f55a0cf4-44d3-4896-911b-430d13f1f67e-kube-api-access-mwx6h\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.400787 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9253fb1e-9dce-4e54-80ee-fba5e3152596-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.400802 4910 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9253fb1e-9dce-4e54-80ee-fba5e3152596-var-run\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.400919 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c6909118-b0ce-402c-8bb4-7ce665250739-config" (OuterVolumeSpecName: "config") pod "c6909118-b0ce-402c-8bb4-7ce665250739" (UID: "c6909118-b0ce-402c-8bb4-7ce665250739"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.401429 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c6909118-b0ce-402c-8bb4-7ce665250739-scripts" (OuterVolumeSpecName: "scripts") pod "c6909118-b0ce-402c-8bb4-7ce665250739" (UID: "c6909118-b0ce-402c-8bb4-7ce665250739"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.408152 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c6909118-b0ce-402c-8bb4-7ce665250739-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "c6909118-b0ce-402c-8bb4-7ce665250739" (UID: "c6909118-b0ce-402c-8bb4-7ce665250739"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.413055 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6909118-b0ce-402c-8bb4-7ce665250739-kube-api-access-k24n8" (OuterVolumeSpecName: "kube-api-access-k24n8") pod "c6909118-b0ce-402c-8bb4-7ce665250739" (UID: "c6909118-b0ce-402c-8bb4-7ce665250739"). InnerVolumeSpecName "kube-api-access-k24n8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.420673 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "ovndbcluster-nb-etc-ovn") pod "c6909118-b0ce-402c-8bb4-7ce665250739" (UID: "c6909118-b0ce-402c-8bb4-7ce665250739"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.442465 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f55a0cf4-44d3-4896-911b-430d13f1f67e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f55a0cf4-44d3-4896-911b-430d13f1f67e" (UID: "f55a0cf4-44d3-4896-911b-430d13f1f67e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: E0105 22:15:23.449007 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9b905fc177f4972520c21e7cfca7402f4ec2958051e09f353da013b9785c1fd5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 05 22:15:23 crc kubenswrapper[4910]: E0105 22:15:23.449176 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338 is running failed: container process not found" containerID="1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 05 22:15:23 crc kubenswrapper[4910]: E0105 22:15:23.450006 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338 is running failed: container process not found" containerID="1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 05 22:15:23 crc kubenswrapper[4910]: E0105 22:15:23.451238 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338 is running failed: container process not found" containerID="1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 05 22:15:23 crc kubenswrapper[4910]: E0105 22:15:23.451293 4910 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-9g2kt" podUID="780aad6a-41ff-410c-a6fc-6be2faf38b6f" containerName="ovsdb-server" Jan 05 22:15:23 crc kubenswrapper[4910]: E0105 22:15:23.455486 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9b905fc177f4972520c21e7cfca7402f4ec2958051e09f353da013b9785c1fd5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.457372 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f55a0cf4-44d3-4896-911b-430d13f1f67e-config" (OuterVolumeSpecName: "config") pod "f55a0cf4-44d3-4896-911b-430d13f1f67e" (UID: "f55a0cf4-44d3-4896-911b-430d13f1f67e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: E0105 22:15:23.458906 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9b905fc177f4972520c21e7cfca7402f4ec2958051e09f353da013b9785c1fd5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 05 22:15:23 crc kubenswrapper[4910]: E0105 22:15:23.459042 4910 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-9g2kt" podUID="780aad6a-41ff-410c-a6fc-6be2faf38b6f" containerName="ovs-vswitchd" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.460467 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f55a0cf4-44d3-4896-911b-430d13f1f67e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f55a0cf4-44d3-4896-911b-430d13f1f67e" (UID: "f55a0cf4-44d3-4896-911b-430d13f1f67e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.463944 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f55a0cf4-44d3-4896-911b-430d13f1f67e-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "f55a0cf4-44d3-4896-911b-430d13f1f67e" (UID: "f55a0cf4-44d3-4896-911b-430d13f1f67e"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.502391 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c6909118-b0ce-402c-8bb4-7ce665250739-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.502413 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c6909118-b0ce-402c-8bb4-7ce665250739-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.502423 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c6909118-b0ce-402c-8bb4-7ce665250739-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.502443 4910 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.502454 4910 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f55a0cf4-44d3-4896-911b-430d13f1f67e-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.502463 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f55a0cf4-44d3-4896-911b-430d13f1f67e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.502471 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k24n8\" (UniqueName: \"kubernetes.io/projected/c6909118-b0ce-402c-8bb4-7ce665250739-kube-api-access-k24n8\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.502481 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f55a0cf4-44d3-4896-911b-430d13f1f67e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.502489 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f55a0cf4-44d3-4896-911b-430d13f1f67e-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.503084 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "206d2077-4a66-4c6d-aa55-6bf0e0f88c2c" (UID: "206d2077-4a66-4c6d-aa55-6bf0e0f88c2c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.516254 4910 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.543094 4910 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.594901 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f55a0cf4-44d3-4896-911b-430d13f1f67e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f55a0cf4-44d3-4896-911b-430d13f1f67e" (UID: "f55a0cf4-44d3-4896-911b-430d13f1f67e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.604456 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-b7b888cd9-zwrvg"] Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.604666 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.604689 4910 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.604699 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f55a0cf4-44d3-4896-911b-430d13f1f67e-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.604708 4910 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.604704 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-b7b888cd9-zwrvg" podUID="24f2eef4-3eac-4643-bffa-0747afae172a" containerName="proxy-httpd" containerID="cri-o://484d4b08e459d7050c6c22231b91516731f4932da1c971f389be1e4993a99ee9" gracePeriod=30 Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.604777 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-b7b888cd9-zwrvg" podUID="24f2eef4-3eac-4643-bffa-0747afae172a" containerName="proxy-server" containerID="cri-o://228c36e7a0ee3706097ffefa2fffb06644ad6938f9d1c4b5c163a457e6f37c30" gracePeriod=30 Jan 05 22:15:23 crc kubenswrapper[4910]: E0105 22:15:23.604790 4910 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 05 22:15:23 crc kubenswrapper[4910]: E0105 22:15:23.604942 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b9cedfb5-8c45-434f-b04d-694bf6d600b8-config-data podName:b9cedfb5-8c45-434f-b04d-694bf6d600b8 nodeName:}" failed. No retries permitted until 2026-01-05 22:15:25.604924291 +0000 UTC m=+1457.182421961 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/b9cedfb5-8c45-434f-b04d-694bf6d600b8-config-data") pod "rabbitmq-cell1-server-0" (UID: "b9cedfb5-8c45-434f-b04d-694bf6d600b8") : configmap "rabbitmq-cell1-config-data" not found Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.612895 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.617238 4910 generic.go:334] "Generic (PLEG): container finished" podID="f9587597-0dcc-4c3a-b578-f9797dd2f9c1" containerID="e582b0a3f3996c075f0d0a3ac06e81dc222960f93f515619da44899ee0b2bce4" exitCode=0 Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.617317 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"f9587597-0dcc-4c3a-b578-f9797dd2f9c1","Type":"ContainerDied","Data":"e582b0a3f3996c075f0d0a3ac06e81dc222960f93f515619da44899ee0b2bce4"} Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.619268 4910 generic.go:334] "Generic (PLEG): container finished" podID="cf7e2b20-58e5-4c61-9e50-c1af51acf521" containerID="b5b23f1d39fd87015c972670711fa8663521d44165624ef12201ef9f0c36a505" exitCode=143 Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.619305 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cf7e2b20-58e5-4c61-9e50-c1af51acf521","Type":"ContainerDied","Data":"b5b23f1d39fd87015c972670711fa8663521d44165624ef12201ef9f0c36a505"} Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.619513 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6909118-b0ce-402c-8bb4-7ce665250739-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c6909118-b0ce-402c-8bb4-7ce665250739" (UID: "c6909118-b0ce-402c-8bb4-7ce665250739"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.620813 4910 generic.go:334] "Generic (PLEG): container finished" podID="45acd92f-2e5d-4fc1-8b91-c91f165e786a" containerID="6104667b5ae1cdcd47a597709123b12716141db09f9b433cb838f5a9fceaa70c" exitCode=143 Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.620868 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" event={"ID":"45acd92f-2e5d-4fc1-8b91-c91f165e786a","Type":"ContainerDied","Data":"6104667b5ae1cdcd47a597709123b12716141db09f9b433cb838f5a9fceaa70c"} Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.623880 4910 generic.go:334] "Generic (PLEG): container finished" podID="19d63cd6-26c3-439b-a9f6-5a53f27d9e0e" containerID="e56637041d9755fd6fda8b6ee2207de4c4a054e4001e101db38b784bf6a8eb7a" exitCode=0 Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.623901 4910 generic.go:334] "Generic (PLEG): container finished" podID="19d63cd6-26c3-439b-a9f6-5a53f27d9e0e" containerID="aa48fa221aab1fca8baf355d7d5b238e363506882c0807675e58c0556680cf81" exitCode=0 Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.623939 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e","Type":"ContainerDied","Data":"e56637041d9755fd6fda8b6ee2207de4c4a054e4001e101db38b784bf6a8eb7a"} Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.623959 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e","Type":"ContainerDied","Data":"aa48fa221aab1fca8baf355d7d5b238e363506882c0807675e58c0556680cf81"} Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.628912 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_206d2077-4a66-4c6d-aa55-6bf0e0f88c2c/ovsdbserver-sb/0.log" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.629004 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"206d2077-4a66-4c6d-aa55-6bf0e0f88c2c","Type":"ContainerDied","Data":"5a454293173b849fd64d11ff249f1e8fb8cfde1bc8277f1a44e5d944fe0b0ac6"} Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.629053 4910 scope.go:117] "RemoveContainer" containerID="5e44050096e20cb6a25794fd8f53d149477e83b2422b09ae2b60db07b3bfa76e" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.629231 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.630468 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "206d2077-4a66-4c6d-aa55-6bf0e0f88c2c" (UID: "206d2077-4a66-4c6d-aa55-6bf0e0f88c2c"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.633953 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-ovsdbserver-sb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-sb-tls-certs") pod "206d2077-4a66-4c6d-aa55-6bf0e0f88c2c" (UID: "206d2077-4a66-4c6d-aa55-6bf0e0f88c2c"). InnerVolumeSpecName "ovsdbserver-sb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.635352 4910 generic.go:334] "Generic (PLEG): container finished" podID="cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b" containerID="71aa4a23693de64aaa8cbbd15881cbffefc0342e211e96812e957ba634cedcdb" exitCode=143 Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.635472 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-78b74ccb54-wvrcf" event={"ID":"cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b","Type":"ContainerDied","Data":"71aa4a23693de64aaa8cbbd15881cbffefc0342e211e96812e957ba634cedcdb"} Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.638566 4910 generic.go:334] "Generic (PLEG): container finished" podID="ce8ea9ec-e799-457a-aaca-e16b591bdf0c" containerID="b200e9f40ae5b0a34ae3718175edc6e00f0e7819999c5ddcf7777af1ffb93d24" exitCode=143 Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.638618 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-66897dc6c-9tqxs" event={"ID":"ce8ea9ec-e799-457a-aaca-e16b591bdf0c","Type":"ContainerDied","Data":"b200e9f40ae5b0a34ae3718175edc6e00f0e7819999c5ddcf7777af1ffb93d24"} Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.640567 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-cfp97" event={"ID":"9253fb1e-9dce-4e54-80ee-fba5e3152596","Type":"ContainerDied","Data":"a0c16b818c8e40bedcb653d0eed1d86bbbe46f15ce82243cb48c16eee7b1d32e"} Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.640645 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-cfp97" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.647879 4910 generic.go:334] "Generic (PLEG): container finished" podID="da2a33ae-86a0-465d-a05e-89007e39e580" containerID="9508ef451ff0f7e73dc0cfea8eda8b03067704bfee4c29361c6f466617631e69" exitCode=0 Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.648493 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"da2a33ae-86a0-465d-a05e-89007e39e580","Type":"ContainerDied","Data":"9508ef451ff0f7e73dc0cfea8eda8b03067704bfee4c29361c6f466617631e69"} Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.658365 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9253fb1e-9dce-4e54-80ee-fba5e3152596-ovn-controller-tls-certs" (OuterVolumeSpecName: "ovn-controller-tls-certs") pod "9253fb1e-9dce-4e54-80ee-fba5e3152596" (UID: "9253fb1e-9dce-4e54-80ee-fba5e3152596"). InnerVolumeSpecName "ovn-controller-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.658721 4910 generic.go:334] "Generic (PLEG): container finished" podID="9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a" containerID="b0963465056d457d77ba82b41400764fc535aa116bd7004b3c5f6069bc02b174" exitCode=137 Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.658799 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.662434 4910 generic.go:334] "Generic (PLEG): container finished" podID="3486557d-93f8-44c2-b40a-dd8aca19d8e1" containerID="a8b2b4d5b559dae71be16c91b6e3ceb8f53c013e2ed93dca2aa9f32d74982c10" exitCode=143 Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.662497 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3486557d-93f8-44c2-b40a-dd8aca19d8e1","Type":"ContainerDied","Data":"a8b2b4d5b559dae71be16c91b6e3ceb8f53c013e2ed93dca2aa9f32d74982c10"} Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.675775 4910 generic.go:334] "Generic (PLEG): container finished" podID="780aad6a-41ff-410c-a6fc-6be2faf38b6f" containerID="1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338" exitCode=0 Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.675860 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-9g2kt" event={"ID":"780aad6a-41ff-410c-a6fc-6be2faf38b6f","Type":"ContainerDied","Data":"1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338"} Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.677956 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-867cd545c7-pd68r" event={"ID":"f55a0cf4-44d3-4896-911b-430d13f1f67e","Type":"ContainerDied","Data":"071b7781d03e5612a0f9721e4576f1cb43f6a2b70be9745539d6632bf90c2e7f"} Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.678012 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-867cd545c7-pd68r" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.681162 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6909118-b0ce-402c-8bb4-7ce665250739-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "c6909118-b0ce-402c-8bb4-7ce665250739" (UID: "c6909118-b0ce-402c-8bb4-7ce665250739"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.685024 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6909118-b0ce-402c-8bb4-7ce665250739-ovsdbserver-nb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-nb-tls-certs") pod "c6909118-b0ce-402c-8bb4-7ce665250739" (UID: "c6909118-b0ce-402c-8bb4-7ce665250739"). InnerVolumeSpecName "ovsdbserver-nb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.691652 4910 generic.go:334] "Generic (PLEG): container finished" podID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerID="f2769fedd4f026dd164121600d29619d9faa807462e36ef2df370d00a00de88f" exitCode=0 Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.691889 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c","Type":"ContainerDied","Data":"f2769fedd4f026dd164121600d29619d9faa807462e36ef2df370d00a00de88f"} Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.693938 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_c6909118-b0ce-402c-8bb4-7ce665250739/ovsdbserver-nb/0.log" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.694155 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6b8d97d96d-jbcrk" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.694827 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.694922 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"c6909118-b0ce-402c-8bb4-7ce665250739","Type":"ContainerDied","Data":"705c4d3ed11f60e755643b5bfc202d1f6039141c978bf5d2c5c5e34fe09a8aab"} Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.696176 4910 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/root-account-create-update-lnk9j" secret="" err="secret \"galera-openstack-cell1-dockercfg-6b5kw\" not found" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.697597 4910 scope.go:117] "RemoveContainer" containerID="ae292dd58468a3ca3fe41cf2714cbea7c847466d8e283f8fbe34e48f2fc358f9" Jan 05 22:15:23 crc kubenswrapper[4910]: E0105 22:15:23.703385 4910 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 05 22:15:23 crc kubenswrapper[4910]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 05 22:15:23 crc kubenswrapper[4910]: Jan 05 22:15:23 crc kubenswrapper[4910]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 05 22:15:23 crc kubenswrapper[4910]: Jan 05 22:15:23 crc kubenswrapper[4910]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 05 22:15:23 crc kubenswrapper[4910]: Jan 05 22:15:23 crc kubenswrapper[4910]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 05 22:15:23 crc kubenswrapper[4910]: Jan 05 22:15:23 crc kubenswrapper[4910]: if [ -n "" ]; then Jan 05 22:15:23 crc kubenswrapper[4910]: GRANT_DATABASE="" Jan 05 22:15:23 crc kubenswrapper[4910]: else Jan 05 22:15:23 crc kubenswrapper[4910]: GRANT_DATABASE="*" Jan 05 22:15:23 crc kubenswrapper[4910]: fi Jan 05 22:15:23 crc kubenswrapper[4910]: Jan 05 22:15:23 crc kubenswrapper[4910]: # going for maximum compatibility here: Jan 05 22:15:23 crc kubenswrapper[4910]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 05 22:15:23 crc kubenswrapper[4910]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 05 22:15:23 crc kubenswrapper[4910]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 05 22:15:23 crc kubenswrapper[4910]: # support updates Jan 05 22:15:23 crc kubenswrapper[4910]: Jan 05 22:15:23 crc kubenswrapper[4910]: $MYSQL_CMD < logger="UnhandledError" Jan 05 22:15:23 crc kubenswrapper[4910]: E0105 22:15:23.704812 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"openstack-cell1-mariadb-root-db-secret\\\" not found\"" pod="openstack/root-account-create-update-lnk9j" podUID="f509687a-bb68-4247-b4de-0f0cb99ca389" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.705132 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a-openstack-config-secret\") pod \"9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a\" (UID: \"9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.705240 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4bg4z\" (UniqueName: \"kubernetes.io/projected/9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a-kube-api-access-4bg4z\") pod \"9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a\" (UID: \"9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.705315 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a-openstack-config\") pod \"9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a\" (UID: \"9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.705483 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a-combined-ca-bundle\") pod \"9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a\" (UID: \"9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a\") " Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.705898 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-ovsdbserver-sb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.705914 4910 reconciler_common.go:293] "Volume detached for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/9253fb1e-9dce-4e54-80ee-fba5e3152596-ovn-controller-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.705923 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6909118-b0ce-402c-8bb4-7ce665250739-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.705933 4910 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.705942 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6909118-b0ce-402c-8bb4-7ce665250739-ovsdbserver-nb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.705950 4910 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6909118-b0ce-402c-8bb4-7ce665250739-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.733699 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a-kube-api-access-4bg4z" (OuterVolumeSpecName: "kube-api-access-4bg4z") pod "9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a" (UID: "9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a"). InnerVolumeSpecName "kube-api-access-4bg4z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.797654 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-2c36-account-create-update-777vv"] Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.813895 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4bg4z\" (UniqueName: \"kubernetes.io/projected/9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a-kube-api-access-4bg4z\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.816382 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-d6c5d94b9-llc4f"] Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.825767 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a" (UID: "9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.837092 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a" (UID: "9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.841339 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-787f96fcd6-44r4b"] Jan 05 22:15:23 crc kubenswrapper[4910]: E0105 22:15:23.842715 4910 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 05 22:15:23 crc kubenswrapper[4910]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 05 22:15:23 crc kubenswrapper[4910]: Jan 05 22:15:23 crc kubenswrapper[4910]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 05 22:15:23 crc kubenswrapper[4910]: Jan 05 22:15:23 crc kubenswrapper[4910]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 05 22:15:23 crc kubenswrapper[4910]: Jan 05 22:15:23 crc kubenswrapper[4910]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 05 22:15:23 crc kubenswrapper[4910]: Jan 05 22:15:23 crc kubenswrapper[4910]: if [ -n "glance" ]; then Jan 05 22:15:23 crc kubenswrapper[4910]: GRANT_DATABASE="glance" Jan 05 22:15:23 crc kubenswrapper[4910]: else Jan 05 22:15:23 crc kubenswrapper[4910]: GRANT_DATABASE="*" Jan 05 22:15:23 crc kubenswrapper[4910]: fi Jan 05 22:15:23 crc kubenswrapper[4910]: Jan 05 22:15:23 crc kubenswrapper[4910]: # going for maximum compatibility here: Jan 05 22:15:23 crc kubenswrapper[4910]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 05 22:15:23 crc kubenswrapper[4910]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 05 22:15:23 crc kubenswrapper[4910]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 05 22:15:23 crc kubenswrapper[4910]: # support updates Jan 05 22:15:23 crc kubenswrapper[4910]: Jan 05 22:15:23 crc kubenswrapper[4910]: $MYSQL_CMD < logger="UnhandledError" Jan 05 22:15:23 crc kubenswrapper[4910]: E0105 22:15:23.843846 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"glance-db-secret\\\" not found\"" pod="openstack/glance-2c36-account-create-update-777vv" podUID="04ef3843-8448-4842-aaf3-7e2bcc428122" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.854025 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-1a42-account-create-update-4vz6m"] Jan 05 22:15:23 crc kubenswrapper[4910]: E0105 22:15:23.873634 4910 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 05 22:15:23 crc kubenswrapper[4910]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 05 22:15:23 crc kubenswrapper[4910]: Jan 05 22:15:23 crc kubenswrapper[4910]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 05 22:15:23 crc kubenswrapper[4910]: Jan 05 22:15:23 crc kubenswrapper[4910]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 05 22:15:23 crc kubenswrapper[4910]: Jan 05 22:15:23 crc kubenswrapper[4910]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 05 22:15:23 crc kubenswrapper[4910]: Jan 05 22:15:23 crc kubenswrapper[4910]: if [ -n "barbican" ]; then Jan 05 22:15:23 crc kubenswrapper[4910]: GRANT_DATABASE="barbican" Jan 05 22:15:23 crc kubenswrapper[4910]: else Jan 05 22:15:23 crc kubenswrapper[4910]: GRANT_DATABASE="*" Jan 05 22:15:23 crc kubenswrapper[4910]: fi Jan 05 22:15:23 crc kubenswrapper[4910]: Jan 05 22:15:23 crc kubenswrapper[4910]: # going for maximum compatibility here: Jan 05 22:15:23 crc kubenswrapper[4910]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 05 22:15:23 crc kubenswrapper[4910]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 05 22:15:23 crc kubenswrapper[4910]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 05 22:15:23 crc kubenswrapper[4910]: # support updates Jan 05 22:15:23 crc kubenswrapper[4910]: Jan 05 22:15:23 crc kubenswrapper[4910]: $MYSQL_CMD < logger="UnhandledError" Jan 05 22:15:23 crc kubenswrapper[4910]: E0105 22:15:23.876310 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"barbican-db-secret\\\" not found\"" pod="openstack/barbican-1a42-account-create-update-4vz6m" podUID="e63178b0-da1f-4d9c-b680-9fdddcd51b9a" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.880737 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a" (UID: "9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.916100 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.916609 4910 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.916694 4910 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a-openstack-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:23 crc kubenswrapper[4910]: E0105 22:15:23.916285 4910 configmap.go:193] Couldn't get configMap openstack/openstack-cell1-scripts: configmap "openstack-cell1-scripts" not found Jan 05 22:15:23 crc kubenswrapper[4910]: E0105 22:15:23.916924 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f509687a-bb68-4247-b4de-0f0cb99ca389-operator-scripts podName:f509687a-bb68-4247-b4de-0f0cb99ca389 nodeName:}" failed. No retries permitted until 2026-01-05 22:15:25.916900583 +0000 UTC m=+1457.494398253 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/f509687a-bb68-4247-b4de-0f0cb99ca389-operator-scripts") pod "root-account-create-update-lnk9j" (UID: "f509687a-bb68-4247-b4de-0f0cb99ca389") : configmap "openstack-cell1-scripts" not found Jan 05 22:15:23 crc kubenswrapper[4910]: I0105 22:15:23.994288 4910 scope.go:117] "RemoveContainer" containerID="367036c1944402d903c48f5737322433b3dce3b8986e5bf1249815eb02e56af6" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.020754 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtrfx\" (UniqueName: \"kubernetes.io/projected/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-kube-api-access-jtrfx\") pod \"barbican-api-6b8d97d96d-jbcrk\" (UID: \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\") " pod="openstack/barbican-api-6b8d97d96d-jbcrk" Jan 05 22:15:24 crc kubenswrapper[4910]: E0105 22:15:24.021032 4910 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 05 22:15:24 crc kubenswrapper[4910]: E0105 22:15:24.021106 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7e2a3efd-2de7-493e-af91-900b224e5313-config-data podName:7e2a3efd-2de7-493e-af91-900b224e5313 nodeName:}" failed. No retries permitted until 2026-01-05 22:15:28.021086849 +0000 UTC m=+1459.598584519 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/7e2a3efd-2de7-493e-af91-900b224e5313-config-data") pod "rabbitmq-server-0" (UID: "7e2a3efd-2de7-493e-af91-900b224e5313") : configmap "rabbitmq-config-data" not found Jan 05 22:15:24 crc kubenswrapper[4910]: E0105 22:15:24.021207 4910 secret.go:188] Couldn't get secret openstack/barbican-config-data: secret "barbican-config-data" not found Jan 05 22:15:24 crc kubenswrapper[4910]: E0105 22:15:24.021291 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-config-data podName:3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69 nodeName:}" failed. No retries permitted until 2026-01-05 22:15:28.021254323 +0000 UTC m=+1459.598751993 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-config-data") pod "barbican-api-6b8d97d96d-jbcrk" (UID: "3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69") : secret "barbican-config-data" not found Jan 05 22:15:24 crc kubenswrapper[4910]: E0105 22:15:24.024044 4910 projected.go:194] Error preparing data for projected volume kube-api-access-jtrfx for pod openstack/barbican-api-6b8d97d96d-jbcrk: failed to fetch token: serviceaccounts "barbican-barbican" not found Jan 05 22:15:24 crc kubenswrapper[4910]: E0105 22:15:24.024141 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-kube-api-access-jtrfx podName:3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69 nodeName:}" failed. No retries permitted until 2026-01-05 22:15:28.024106574 +0000 UTC m=+1459.601604234 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-jtrfx" (UniqueName: "kubernetes.io/projected/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-kube-api-access-jtrfx") pod "barbican-api-6b8d97d96d-jbcrk" (UID: "3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69") : failed to fetch token: serviceaccounts "barbican-barbican" not found Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.058078 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6b8d97d96d-jbcrk" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.089442 4910 scope.go:117] "RemoveContainer" containerID="b0963465056d457d77ba82b41400764fc535aa116bd7004b3c5f6069bc02b174" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.113420 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-867cd545c7-pd68r"] Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.122467 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-public-tls-certs\") pod \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\" (UID: \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.122514 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-logs\") pod \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\" (UID: \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.122594 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-internal-tls-certs\") pod \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\" (UID: \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.122688 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-config-data\") pod \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\" (UID: \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.122734 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-combined-ca-bundle\") pod \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\" (UID: \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.122777 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-config-data-custom\") pod \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\" (UID: \"3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.124233 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-logs" (OuterVolumeSpecName: "logs") pod "3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69" (UID: "3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.136215 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-867cd545c7-pd68r"] Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.144587 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.147830 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69" (UID: "3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.153733 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.162146 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69" (UID: "3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.164854 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.167365 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-config-data" (OuterVolumeSpecName: "config-data") pod "3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69" (UID: "3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.168034 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69" (UID: "3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.168207 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69" (UID: "3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.178340 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.185175 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-cfp97"] Jan 05 22:15:24 crc kubenswrapper[4910]: E0105 22:15:24.185281 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="708b16276678b2822ae86c9c52e58e344dbcf830fd5f034e5d7cb53f881b9997" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.185325 4910 scope.go:117] "RemoveContainer" containerID="b0963465056d457d77ba82b41400764fc535aa116bd7004b3c5f6069bc02b174" Jan 05 22:15:24 crc kubenswrapper[4910]: E0105 22:15:24.189883 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b0963465056d457d77ba82b41400764fc535aa116bd7004b3c5f6069bc02b174\": container with ID starting with b0963465056d457d77ba82b41400764fc535aa116bd7004b3c5f6069bc02b174 not found: ID does not exist" containerID="b0963465056d457d77ba82b41400764fc535aa116bd7004b3c5f6069bc02b174" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.189931 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0963465056d457d77ba82b41400764fc535aa116bd7004b3c5f6069bc02b174"} err="failed to get container status \"b0963465056d457d77ba82b41400764fc535aa116bd7004b3c5f6069bc02b174\": rpc error: code = NotFound desc = could not find container \"b0963465056d457d77ba82b41400764fc535aa116bd7004b3c5f6069bc02b174\": container with ID starting with b0963465056d457d77ba82b41400764fc535aa116bd7004b3c5f6069bc02b174 not found: ID does not exist" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.189960 4910 scope.go:117] "RemoveContainer" containerID="5d88d5d67f2af076e38a459d8f23e6f3dfd6d4cf06b6347db6a041118cb2daba" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.193592 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-cfp97"] Jan 05 22:15:24 crc kubenswrapper[4910]: E0105 22:15:24.202997 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="708b16276678b2822ae86c9c52e58e344dbcf830fd5f034e5d7cb53f881b9997" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 05 22:15:24 crc kubenswrapper[4910]: E0105 22:15:24.210104 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="708b16276678b2822ae86c9c52e58e344dbcf830fd5f034e5d7cb53f881b9997" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 05 22:15:24 crc kubenswrapper[4910]: E0105 22:15:24.210189 4910 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="70694d65-fa64-4667-b1aa-bac50650687c" containerName="nova-cell1-conductor-conductor" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.212970 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.221495 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.228031 4910 scope.go:117] "RemoveContainer" containerID="92466a799d531b6baec996fb3d13141ee302474468214caed62ddb96fa5208ad" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.228765 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.230710 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.230741 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.230751 4910 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.230759 4910 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.230767 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-logs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.230775 4910 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.301980 4910 scope.go:117] "RemoveContainer" containerID="59360022044ca27d9ee5757033d8f3c5a80aee6675d87823287602295063b5ec" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.331668 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/da2a33ae-86a0-465d-a05e-89007e39e580-nova-novncproxy-tls-certs\") pod \"da2a33ae-86a0-465d-a05e-89007e39e580\" (UID: \"da2a33ae-86a0-465d-a05e-89007e39e580\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.331767 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/da2a33ae-86a0-465d-a05e-89007e39e580-vencrypt-tls-certs\") pod \"da2a33ae-86a0-465d-a05e-89007e39e580\" (UID: \"da2a33ae-86a0-465d-a05e-89007e39e580\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.331799 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-config-data-custom\") pod \"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e\" (UID: \"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.331832 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.331860 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-kolla-config\") pod \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.331896 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-config-data-generated\") pod \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.331932 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da2a33ae-86a0-465d-a05e-89007e39e580-config-data\") pod \"da2a33ae-86a0-465d-a05e-89007e39e580\" (UID: \"da2a33ae-86a0-465d-a05e-89007e39e580\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.331964 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-operator-scripts\") pod \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.331996 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-config-data\") pod \"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e\" (UID: \"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.332015 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wsm8w\" (UniqueName: \"kubernetes.io/projected/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-kube-api-access-wsm8w\") pod \"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e\" (UID: \"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.332044 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-galera-tls-certs\") pod \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.332077 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wx72l\" (UniqueName: \"kubernetes.io/projected/da2a33ae-86a0-465d-a05e-89007e39e580-kube-api-access-wx72l\") pod \"da2a33ae-86a0-465d-a05e-89007e39e580\" (UID: \"da2a33ae-86a0-465d-a05e-89007e39e580\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.332130 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-combined-ca-bundle\") pod \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.332184 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-scripts\") pod \"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e\" (UID: \"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.332203 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-combined-ca-bundle\") pod \"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e\" (UID: \"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.332224 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-config-data-default\") pod \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.332259 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x49n4\" (UniqueName: \"kubernetes.io/projected/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-kube-api-access-x49n4\") pod \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\" (UID: \"f9587597-0dcc-4c3a-b578-f9797dd2f9c1\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.332280 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-etc-machine-id\") pod \"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e\" (UID: \"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.332306 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da2a33ae-86a0-465d-a05e-89007e39e580-combined-ca-bundle\") pod \"da2a33ae-86a0-465d-a05e-89007e39e580\" (UID: \"da2a33ae-86a0-465d-a05e-89007e39e580\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.332802 4910 scope.go:117] "RemoveContainer" containerID="839ae5ea9fbfa6a3aa2a1bb5b86ebdc2961253bc0eb7f9f7fe77393230b78a2e" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.334306 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "f9587597-0dcc-4c3a-b578-f9797dd2f9c1" (UID: "f9587597-0dcc-4c3a-b578-f9797dd2f9c1"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.334617 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "f9587597-0dcc-4c3a-b578-f9797dd2f9c1" (UID: "f9587597-0dcc-4c3a-b578-f9797dd2f9c1"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.334734 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "f9587597-0dcc-4c3a-b578-f9797dd2f9c1" (UID: "f9587597-0dcc-4c3a-b578-f9797dd2f9c1"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.334782 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "19d63cd6-26c3-439b-a9f6-5a53f27d9e0e" (UID: "19d63cd6-26c3-439b-a9f6-5a53f27d9e0e"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.337128 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f9587597-0dcc-4c3a-b578-f9797dd2f9c1" (UID: "f9587597-0dcc-4c3a-b578-f9797dd2f9c1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.338992 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-kube-api-access-wsm8w" (OuterVolumeSpecName: "kube-api-access-wsm8w") pod "19d63cd6-26c3-439b-a9f6-5a53f27d9e0e" (UID: "19d63cd6-26c3-439b-a9f6-5a53f27d9e0e"). InnerVolumeSpecName "kube-api-access-wsm8w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.341285 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-kube-api-access-x49n4" (OuterVolumeSpecName: "kube-api-access-x49n4") pod "f9587597-0dcc-4c3a-b578-f9797dd2f9c1" (UID: "f9587597-0dcc-4c3a-b578-f9797dd2f9c1"). InnerVolumeSpecName "kube-api-access-x49n4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.341939 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-scripts" (OuterVolumeSpecName: "scripts") pod "19d63cd6-26c3-439b-a9f6-5a53f27d9e0e" (UID: "19d63cd6-26c3-439b-a9f6-5a53f27d9e0e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.382948 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da2a33ae-86a0-465d-a05e-89007e39e580-kube-api-access-wx72l" (OuterVolumeSpecName: "kube-api-access-wx72l") pod "da2a33ae-86a0-465d-a05e-89007e39e580" (UID: "da2a33ae-86a0-465d-a05e-89007e39e580"). InnerVolumeSpecName "kube-api-access-wx72l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.395290 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "mysql-db") pod "f9587597-0dcc-4c3a-b578-f9797dd2f9c1" (UID: "f9587597-0dcc-4c3a-b578-f9797dd2f9c1"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.400256 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "19d63cd6-26c3-439b-a9f6-5a53f27d9e0e" (UID: "19d63cd6-26c3-439b-a9f6-5a53f27d9e0e"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.405198 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="7e2a3efd-2de7-493e-af91-900b224e5313" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.100:5671: connect: connection refused" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.436053 4910 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.436096 4910 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.436112 4910 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-kolla-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.436143 4910 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-config-data-generated\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.436164 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.436176 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wsm8w\" (UniqueName: \"kubernetes.io/projected/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-kube-api-access-wsm8w\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.436188 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wx72l\" (UniqueName: \"kubernetes.io/projected/da2a33ae-86a0-465d-a05e-89007e39e580-kube-api-access-wx72l\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.436198 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.436212 4910 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-config-data-default\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.436220 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x49n4\" (UniqueName: \"kubernetes.io/projected/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-kube-api-access-x49n4\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.436228 4910 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.459473 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-lhd97"] Jan 05 22:15:24 crc kubenswrapper[4910]: E0105 22:15:24.459906 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="206d2077-4a66-4c6d-aa55-6bf0e0f88c2c" containerName="ovsdbserver-sb" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.459930 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="206d2077-4a66-4c6d-aa55-6bf0e0f88c2c" containerName="ovsdbserver-sb" Jan 05 22:15:24 crc kubenswrapper[4910]: E0105 22:15:24.459943 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f817e58c-a8aa-4f0d-8486-153659100a11" containerName="ovn-northd" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.459950 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f817e58c-a8aa-4f0d-8486-153659100a11" containerName="ovn-northd" Jan 05 22:15:24 crc kubenswrapper[4910]: E0105 22:15:24.459962 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f55a0cf4-44d3-4896-911b-430d13f1f67e" containerName="init" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.459969 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f55a0cf4-44d3-4896-911b-430d13f1f67e" containerName="init" Jan 05 22:15:24 crc kubenswrapper[4910]: E0105 22:15:24.459981 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19d63cd6-26c3-439b-a9f6-5a53f27d9e0e" containerName="cinder-scheduler" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.459987 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="19d63cd6-26c3-439b-a9f6-5a53f27d9e0e" containerName="cinder-scheduler" Jan 05 22:15:24 crc kubenswrapper[4910]: E0105 22:15:24.459998 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="266ffadc-b889-4089-9779-c64623269d42" containerName="openstack-network-exporter" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.460003 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="266ffadc-b889-4089-9779-c64623269d42" containerName="openstack-network-exporter" Jan 05 22:15:24 crc kubenswrapper[4910]: E0105 22:15:24.460016 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da2a33ae-86a0-465d-a05e-89007e39e580" containerName="nova-cell1-novncproxy-novncproxy" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.460023 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="da2a33ae-86a0-465d-a05e-89007e39e580" containerName="nova-cell1-novncproxy-novncproxy" Jan 05 22:15:24 crc kubenswrapper[4910]: E0105 22:15:24.460040 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="206d2077-4a66-4c6d-aa55-6bf0e0f88c2c" containerName="openstack-network-exporter" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.460045 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="206d2077-4a66-4c6d-aa55-6bf0e0f88c2c" containerName="openstack-network-exporter" Jan 05 22:15:24 crc kubenswrapper[4910]: E0105 22:15:24.460056 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6909118-b0ce-402c-8bb4-7ce665250739" containerName="openstack-network-exporter" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.460062 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6909118-b0ce-402c-8bb4-7ce665250739" containerName="openstack-network-exporter" Jan 05 22:15:24 crc kubenswrapper[4910]: E0105 22:15:24.460068 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f55a0cf4-44d3-4896-911b-430d13f1f67e" containerName="dnsmasq-dns" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.460074 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f55a0cf4-44d3-4896-911b-430d13f1f67e" containerName="dnsmasq-dns" Jan 05 22:15:24 crc kubenswrapper[4910]: E0105 22:15:24.460086 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9587597-0dcc-4c3a-b578-f9797dd2f9c1" containerName="mysql-bootstrap" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.460093 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9587597-0dcc-4c3a-b578-f9797dd2f9c1" containerName="mysql-bootstrap" Jan 05 22:15:24 crc kubenswrapper[4910]: E0105 22:15:24.460103 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9253fb1e-9dce-4e54-80ee-fba5e3152596" containerName="ovn-controller" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.460108 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="9253fb1e-9dce-4e54-80ee-fba5e3152596" containerName="ovn-controller" Jan 05 22:15:24 crc kubenswrapper[4910]: E0105 22:15:24.460138 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f817e58c-a8aa-4f0d-8486-153659100a11" containerName="openstack-network-exporter" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.460144 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f817e58c-a8aa-4f0d-8486-153659100a11" containerName="openstack-network-exporter" Jan 05 22:15:24 crc kubenswrapper[4910]: E0105 22:15:24.460149 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19d63cd6-26c3-439b-a9f6-5a53f27d9e0e" containerName="probe" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.460157 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="19d63cd6-26c3-439b-a9f6-5a53f27d9e0e" containerName="probe" Jan 05 22:15:24 crc kubenswrapper[4910]: E0105 22:15:24.460169 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9587597-0dcc-4c3a-b578-f9797dd2f9c1" containerName="galera" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.460174 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9587597-0dcc-4c3a-b578-f9797dd2f9c1" containerName="galera" Jan 05 22:15:24 crc kubenswrapper[4910]: E0105 22:15:24.460189 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6909118-b0ce-402c-8bb4-7ce665250739" containerName="ovsdbserver-nb" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.460194 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6909118-b0ce-402c-8bb4-7ce665250739" containerName="ovsdbserver-nb" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.460488 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="206d2077-4a66-4c6d-aa55-6bf0e0f88c2c" containerName="ovsdbserver-sb" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.460506 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="da2a33ae-86a0-465d-a05e-89007e39e580" containerName="nova-cell1-novncproxy-novncproxy" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.460518 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="9253fb1e-9dce-4e54-80ee-fba5e3152596" containerName="ovn-controller" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.460525 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="19d63cd6-26c3-439b-a9f6-5a53f27d9e0e" containerName="probe" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.460536 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="19d63cd6-26c3-439b-a9f6-5a53f27d9e0e" containerName="cinder-scheduler" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.460543 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6909118-b0ce-402c-8bb4-7ce665250739" containerName="openstack-network-exporter" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.460554 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="206d2077-4a66-4c6d-aa55-6bf0e0f88c2c" containerName="openstack-network-exporter" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.460564 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="266ffadc-b889-4089-9779-c64623269d42" containerName="openstack-network-exporter" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.460573 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9587597-0dcc-4c3a-b578-f9797dd2f9c1" containerName="galera" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.460583 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6909118-b0ce-402c-8bb4-7ce665250739" containerName="ovsdbserver-nb" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.460593 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f817e58c-a8aa-4f0d-8486-153659100a11" containerName="ovn-northd" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.460603 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f817e58c-a8aa-4f0d-8486-153659100a11" containerName="openstack-network-exporter" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.460610 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f55a0cf4-44d3-4896-911b-430d13f1f67e" containerName="dnsmasq-dns" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.461263 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-lhd97" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.465724 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-b0f9-account-create-update-f5pmg"] Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.468387 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 05 22:15:24 crc kubenswrapper[4910]: E0105 22:15:24.502189 4910 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 05 22:15:24 crc kubenswrapper[4910]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 05 22:15:24 crc kubenswrapper[4910]: Jan 05 22:15:24 crc kubenswrapper[4910]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 05 22:15:24 crc kubenswrapper[4910]: Jan 05 22:15:24 crc kubenswrapper[4910]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 05 22:15:24 crc kubenswrapper[4910]: Jan 05 22:15:24 crc kubenswrapper[4910]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 05 22:15:24 crc kubenswrapper[4910]: Jan 05 22:15:24 crc kubenswrapper[4910]: if [ -n "nova_api" ]; then Jan 05 22:15:24 crc kubenswrapper[4910]: GRANT_DATABASE="nova_api" Jan 05 22:15:24 crc kubenswrapper[4910]: else Jan 05 22:15:24 crc kubenswrapper[4910]: GRANT_DATABASE="*" Jan 05 22:15:24 crc kubenswrapper[4910]: fi Jan 05 22:15:24 crc kubenswrapper[4910]: Jan 05 22:15:24 crc kubenswrapper[4910]: # going for maximum compatibility here: Jan 05 22:15:24 crc kubenswrapper[4910]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 05 22:15:24 crc kubenswrapper[4910]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 05 22:15:24 crc kubenswrapper[4910]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 05 22:15:24 crc kubenswrapper[4910]: # support updates Jan 05 22:15:24 crc kubenswrapper[4910]: Jan 05 22:15:24 crc kubenswrapper[4910]: $MYSQL_CMD < logger="UnhandledError" Jan 05 22:15:24 crc kubenswrapper[4910]: E0105 22:15:24.503695 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"nova-api-db-secret\\\" not found\"" pod="openstack/nova-api-b0f9-account-create-update-f5pmg" podUID="8db5b2ca-0224-4f13-b7d6-be9fb7aa7e07" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.519373 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-lhd97"] Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.541498 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4txxg\" (UniqueName: \"kubernetes.io/projected/2210f0ce-43b3-4560-84f8-b56a65414758-kube-api-access-4txxg\") pod \"root-account-create-update-lhd97\" (UID: \"2210f0ce-43b3-4560-84f8-b56a65414758\") " pod="openstack/root-account-create-update-lhd97" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.541954 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2210f0ce-43b3-4560-84f8-b56a65414758-operator-scripts\") pod \"root-account-create-update-lhd97\" (UID: \"2210f0ce-43b3-4560-84f8-b56a65414758\") " pod="openstack/root-account-create-update-lhd97" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.614344 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da2a33ae-86a0-465d-a05e-89007e39e580-config-data" (OuterVolumeSpecName: "config-data") pod "da2a33ae-86a0-465d-a05e-89007e39e580" (UID: "da2a33ae-86a0-465d-a05e-89007e39e580"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.633871 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f9587597-0dcc-4c3a-b578-f9797dd2f9c1" (UID: "f9587597-0dcc-4c3a-b578-f9797dd2f9c1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.647858 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4txxg\" (UniqueName: \"kubernetes.io/projected/2210f0ce-43b3-4560-84f8-b56a65414758-kube-api-access-4txxg\") pod \"root-account-create-update-lhd97\" (UID: \"2210f0ce-43b3-4560-84f8-b56a65414758\") " pod="openstack/root-account-create-update-lhd97" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.649153 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2210f0ce-43b3-4560-84f8-b56a65414758-operator-scripts\") pod \"root-account-create-update-lhd97\" (UID: \"2210f0ce-43b3-4560-84f8-b56a65414758\") " pod="openstack/root-account-create-update-lhd97" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.649319 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da2a33ae-86a0-465d-a05e-89007e39e580-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.649386 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.650961 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2210f0ce-43b3-4560-84f8-b56a65414758-operator-scripts\") pod \"root-account-create-update-lhd97\" (UID: \"2210f0ce-43b3-4560-84f8-b56a65414758\") " pod="openstack/root-account-create-update-lhd97" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.671545 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4txxg\" (UniqueName: \"kubernetes.io/projected/2210f0ce-43b3-4560-84f8-b56a65414758-kube-api-access-4txxg\") pod \"root-account-create-update-lhd97\" (UID: \"2210f0ce-43b3-4560-84f8-b56a65414758\") " pod="openstack/root-account-create-update-lhd97" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.680362 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da2a33ae-86a0-465d-a05e-89007e39e580-vencrypt-tls-certs" (OuterVolumeSpecName: "vencrypt-tls-certs") pod "da2a33ae-86a0-465d-a05e-89007e39e580" (UID: "da2a33ae-86a0-465d-a05e-89007e39e580"). InnerVolumeSpecName "vencrypt-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.682607 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da2a33ae-86a0-465d-a05e-89007e39e580-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "da2a33ae-86a0-465d-a05e-89007e39e580" (UID: "da2a33ae-86a0-465d-a05e-89007e39e580"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.693087 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-b7b888cd9-zwrvg" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.703101 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="07efd759-c536-425d-938e-a8ccd41706cd" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.164:8776/healthcheck\": read tcp 10.217.0.2:59260->10.217.0.164:8776: read: connection reset by peer" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.710295 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-2c36-account-create-update-777vv" event={"ID":"04ef3843-8448-4842-aaf3-7e2bcc428122","Type":"ContainerStarted","Data":"e7dd0d0430e868fa89ae86c6a1ba97b2a5e130023f1320d7bea349173d16f570"} Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.712405 4910 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.750856 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/24f2eef4-3eac-4643-bffa-0747afae172a-log-httpd\") pod \"24f2eef4-3eac-4643-bffa-0747afae172a\" (UID: \"24f2eef4-3eac-4643-bffa-0747afae172a\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.750926 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/24f2eef4-3eac-4643-bffa-0747afae172a-etc-swift\") pod \"24f2eef4-3eac-4643-bffa-0747afae172a\" (UID: \"24f2eef4-3eac-4643-bffa-0747afae172a\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.750995 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24f2eef4-3eac-4643-bffa-0747afae172a-combined-ca-bundle\") pod \"24f2eef4-3eac-4643-bffa-0747afae172a\" (UID: \"24f2eef4-3eac-4643-bffa-0747afae172a\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.751057 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/24f2eef4-3eac-4643-bffa-0747afae172a-internal-tls-certs\") pod \"24f2eef4-3eac-4643-bffa-0747afae172a\" (UID: \"24f2eef4-3eac-4643-bffa-0747afae172a\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.751137 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24f2eef4-3eac-4643-bffa-0747afae172a-config-data\") pod \"24f2eef4-3eac-4643-bffa-0747afae172a\" (UID: \"24f2eef4-3eac-4643-bffa-0747afae172a\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.751158 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6p9tt\" (UniqueName: \"kubernetes.io/projected/24f2eef4-3eac-4643-bffa-0747afae172a-kube-api-access-6p9tt\") pod \"24f2eef4-3eac-4643-bffa-0747afae172a\" (UID: \"24f2eef4-3eac-4643-bffa-0747afae172a\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.751189 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/24f2eef4-3eac-4643-bffa-0747afae172a-public-tls-certs\") pod \"24f2eef4-3eac-4643-bffa-0747afae172a\" (UID: \"24f2eef4-3eac-4643-bffa-0747afae172a\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.751277 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/24f2eef4-3eac-4643-bffa-0747afae172a-run-httpd\") pod \"24f2eef4-3eac-4643-bffa-0747afae172a\" (UID: \"24f2eef4-3eac-4643-bffa-0747afae172a\") " Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.751749 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da2a33ae-86a0-465d-a05e-89007e39e580-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.751764 4910 reconciler_common.go:293] "Volume detached for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/da2a33ae-86a0-465d-a05e-89007e39e580-vencrypt-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.751772 4910 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.752068 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24f2eef4-3eac-4643-bffa-0747afae172a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "24f2eef4-3eac-4643-bffa-0747afae172a" (UID: "24f2eef4-3eac-4643-bffa-0747afae172a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.752380 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24f2eef4-3eac-4643-bffa-0747afae172a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "24f2eef4-3eac-4643-bffa-0747afae172a" (UID: "24f2eef4-3eac-4643-bffa-0747afae172a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.758144 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.763929 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="206d2077-4a66-4c6d-aa55-6bf0e0f88c2c" path="/var/lib/kubelet/pods/206d2077-4a66-4c6d-aa55-6bf0e0f88c2c/volumes" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.765743 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="266ffadc-b889-4089-9779-c64623269d42" path="/var/lib/kubelet/pods/266ffadc-b889-4089-9779-c64623269d42/volumes" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.769868 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="656781fb-f17c-4ea5-b35c-38d7639eb605" path="/var/lib/kubelet/pods/656781fb-f17c-4ea5-b35c-38d7639eb605/volumes" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.770595 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9253fb1e-9dce-4e54-80ee-fba5e3152596" path="/var/lib/kubelet/pods/9253fb1e-9dce-4e54-80ee-fba5e3152596/volumes" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.771333 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a" path="/var/lib/kubelet/pods/9e934b0f-784d-4e7b-8c78-aa5d7e35aa0a/volumes" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.777149 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6909118-b0ce-402c-8bb4-7ce665250739" path="/var/lib/kubelet/pods/c6909118-b0ce-402c-8bb4-7ce665250739/volumes" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.778410 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-lhd97" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.778488 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f55a0cf4-44d3-4896-911b-430d13f1f67e" path="/var/lib/kubelet/pods/f55a0cf4-44d3-4896-911b-430d13f1f67e/volumes" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.779169 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="b9cedfb5-8c45-434f-b04d-694bf6d600b8" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.101:5671: connect: connection refused" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.779567 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-d6c5d94b9-llc4f" podUID="dc0e5b95-8658-440f-8771-c67a74098057" containerName="barbican-worker-log" containerID="cri-o://dd028682666330bff13245ee6ff70f7e9c71b736d8ba15cebcc6d55a428021f4" gracePeriod=30 Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.780309 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f817e58c-a8aa-4f0d-8486-153659100a11" path="/var/lib/kubelet/pods/f817e58c-a8aa-4f0d-8486-153659100a11/volumes" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.780434 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-d6c5d94b9-llc4f" podUID="dc0e5b95-8658-440f-8771-c67a74098057" containerName="barbican-worker" containerID="cri-o://bc66ebaca647091cd90204976a51f83ad49f72d78ba556dafe73a5c164210302" gracePeriod=30 Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.801719 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.808652 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-d6c5d94b9-llc4f" podStartSLOduration=5.808631575 podStartE2EDuration="5.808631575s" podCreationTimestamp="2026-01-05 22:15:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:15:24.801453647 +0000 UTC m=+1456.378951317" watchObservedRunningTime="2026-01-05 22:15:24.808631575 +0000 UTC m=+1456.386129245" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.809390 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24f2eef4-3eac-4643-bffa-0747afae172a-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "24f2eef4-3eac-4643-bffa-0747afae172a" (UID: "24f2eef4-3eac-4643-bffa-0747afae172a"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.813798 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24f2eef4-3eac-4643-bffa-0747afae172a-kube-api-access-6p9tt" (OuterVolumeSpecName: "kube-api-access-6p9tt") pod "24f2eef4-3eac-4643-bffa-0747afae172a" (UID: "24f2eef4-3eac-4643-bffa-0747afae172a"). InnerVolumeSpecName "kube-api-access-6p9tt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.825508 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "f9587597-0dcc-4c3a-b578-f9797dd2f9c1" (UID: "f9587597-0dcc-4c3a-b578-f9797dd2f9c1"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.829438 4910 generic.go:334] "Generic (PLEG): container finished" podID="24f2eef4-3eac-4643-bffa-0747afae172a" containerID="228c36e7a0ee3706097ffefa2fffb06644ad6938f9d1c4b5c163a457e6f37c30" exitCode=0 Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.829486 4910 generic.go:334] "Generic (PLEG): container finished" podID="24f2eef4-3eac-4643-bffa-0747afae172a" containerID="484d4b08e459d7050c6c22231b91516731f4932da1c971f389be1e4993a99ee9" exitCode=0 Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.830185 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-b7b888cd9-zwrvg" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.841132 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "19d63cd6-26c3-439b-a9f6-5a53f27d9e0e" (UID: "19d63cd6-26c3-439b-a9f6-5a53f27d9e0e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.858334 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6p9tt\" (UniqueName: \"kubernetes.io/projected/24f2eef4-3eac-4643-bffa-0747afae172a-kube-api-access-6p9tt\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.858365 4910 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/24f2eef4-3eac-4643-bffa-0747afae172a-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.858377 4910 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9587597-0dcc-4c3a-b578-f9797dd2f9c1-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.858389 4910 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/24f2eef4-3eac-4643-bffa-0747afae172a-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.858403 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.858564 4910 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/24f2eef4-3eac-4643-bffa-0747afae172a-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.880022 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.915387 4910 generic.go:334] "Generic (PLEG): container finished" podID="70694d65-fa64-4667-b1aa-bac50650687c" containerID="708b16276678b2822ae86c9c52e58e344dbcf830fd5f034e5d7cb53f881b9997" exitCode=0 Jan 05 22:15:24 crc kubenswrapper[4910]: I0105 22:15:24.915540 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6b8d97d96d-jbcrk" Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.083778 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da2a33ae-86a0-465d-a05e-89007e39e580-nova-novncproxy-tls-certs" (OuterVolumeSpecName: "nova-novncproxy-tls-certs") pod "da2a33ae-86a0-465d-a05e-89007e39e580" (UID: "da2a33ae-86a0-465d-a05e-89007e39e580"). InnerVolumeSpecName "nova-novncproxy-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.132310 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24f2eef4-3eac-4643-bffa-0747afae172a-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "24f2eef4-3eac-4643-bffa-0747afae172a" (UID: "24f2eef4-3eac-4643-bffa-0747afae172a"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.146552 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-config-data" (OuterVolumeSpecName: "config-data") pod "19d63cd6-26c3-439b-a9f6-5a53f27d9e0e" (UID: "19d63cd6-26c3-439b-a9f6-5a53f27d9e0e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.156371 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24f2eef4-3eac-4643-bffa-0747afae172a-config-data" (OuterVolumeSpecName: "config-data") pod "24f2eef4-3eac-4643-bffa-0747afae172a" (UID: "24f2eef4-3eac-4643-bffa-0747afae172a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.164208 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24f2eef4-3eac-4643-bffa-0747afae172a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "24f2eef4-3eac-4643-bffa-0747afae172a" (UID: "24f2eef4-3eac-4643-bffa-0747afae172a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.164246 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24f2eef4-3eac-4643-bffa-0747afae172a-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "24f2eef4-3eac-4643-bffa-0747afae172a" (UID: "24f2eef4-3eac-4643-bffa-0747afae172a"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.174725 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.174768 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24f2eef4-3eac-4643-bffa-0747afae172a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.174778 4910 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/24f2eef4-3eac-4643-bffa-0747afae172a-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.174788 4910 reconciler_common.go:293] "Volume detached for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/da2a33ae-86a0-465d-a05e-89007e39e580-nova-novncproxy-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.174799 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24f2eef4-3eac-4643-bffa-0747afae172a-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.174809 4910 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/24f2eef4-3eac-4643-bffa-0747afae172a-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.592573 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"f9587597-0dcc-4c3a-b578-f9797dd2f9c1","Type":"ContainerDied","Data":"dc39a73c9df32694a5df7333758fce61184ece2686dca01fd7a78be87892ad34"} Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.592925 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.592947 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.592962 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-d6c5d94b9-llc4f" event={"ID":"dc0e5b95-8658-440f-8771-c67a74098057","Type":"ContainerStarted","Data":"bc66ebaca647091cd90204976a51f83ad49f72d78ba556dafe73a5c164210302"} Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.592973 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-d6c5d94b9-llc4f" event={"ID":"dc0e5b95-8658-440f-8771-c67a74098057","Type":"ContainerStarted","Data":"dd028682666330bff13245ee6ff70f7e9c71b736d8ba15cebcc6d55a428021f4"} Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.592982 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-d6c5d94b9-llc4f" event={"ID":"dc0e5b95-8658-440f-8771-c67a74098057","Type":"ContainerStarted","Data":"ecf35f7f9d49d337aabbfdf38a5e83601e140aff6e3b0beb4c263e559fcce34f"} Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.592992 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"da2a33ae-86a0-465d-a05e-89007e39e580","Type":"ContainerDied","Data":"eb8afa201c5865f37d529095414b4f20fa5054a5e70dda0c19928897a48322b6"} Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.593005 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-b0f9-account-create-update-f5pmg" event={"ID":"8db5b2ca-0224-4f13-b7d6-be9fb7aa7e07","Type":"ContainerStarted","Data":"5c5f88a8b3403353a689e87e9b5123382746270dd507c4cfd96ca043cbf78c77"} Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.593016 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-b7b888cd9-zwrvg" event={"ID":"24f2eef4-3eac-4643-bffa-0747afae172a","Type":"ContainerDied","Data":"228c36e7a0ee3706097ffefa2fffb06644ad6938f9d1c4b5c163a457e6f37c30"} Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.593029 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-b7b888cd9-zwrvg" event={"ID":"24f2eef4-3eac-4643-bffa-0747afae172a","Type":"ContainerDied","Data":"484d4b08e459d7050c6c22231b91516731f4932da1c971f389be1e4993a99ee9"} Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.593039 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-b7b888cd9-zwrvg" event={"ID":"24f2eef4-3eac-4643-bffa-0747afae172a","Type":"ContainerDied","Data":"0ae3b996825a08cf127b86f2d77bfd5104866a9631526f454e3977b0c8e86d32"} Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.593051 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-787f96fcd6-44r4b" event={"ID":"244c7b09-d3d9-4ae7-864b-ff6758b0de6a","Type":"ContainerStarted","Data":"5f5f3c9fc2640058d48c86ff68f7c8aa4847482f6977cae797955ee8c5bef11c"} Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.593061 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-787f96fcd6-44r4b" event={"ID":"244c7b09-d3d9-4ae7-864b-ff6758b0de6a","Type":"ContainerStarted","Data":"2d3d0b5d3ef3473593653705d6b414ce761afb9c29ef45bc74b7b13a9dca29d4"} Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.593071 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"19d63cd6-26c3-439b-a9f6-5a53f27d9e0e","Type":"ContainerDied","Data":"318b85531003e11d9a7b6e74e370ab105779b4933140d0bdb6ccec5e465ce41c"} Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.593083 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-1a42-account-create-update-4vz6m" event={"ID":"e63178b0-da1f-4d9c-b680-9fdddcd51b9a","Type":"ContainerStarted","Data":"7aafa0cd8b6ea05765d153dbf5b69174a4161982467506c6ba1771fefee84fff"} Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.593092 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"70694d65-fa64-4667-b1aa-bac50650687c","Type":"ContainerDied","Data":"708b16276678b2822ae86c9c52e58e344dbcf830fd5f034e5d7cb53f881b9997"} Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.593335 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="b651f520-1463-434f-b16f-edd2b1b8f8d9" containerName="kube-state-metrics" containerID="cri-o://7eb793854dd2ca885d20aea4858a9baa5dd3b5bf64d04ad614881d8b63b82097" gracePeriod=30 Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.593459 4910 scope.go:117] "RemoveContainer" containerID="e582b0a3f3996c075f0d0a3ac06e81dc222960f93f515619da44899ee0b2bce4" Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.593655 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3d881977-4280-42f6-8ec5-65be97c8dc28" containerName="ceilometer-central-agent" containerID="cri-o://e2d862c3152a2babe7e6e933e033e365153addf3d9f0e0a5bfdf820d3c653e68" gracePeriod=30 Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.594354 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3d881977-4280-42f6-8ec5-65be97c8dc28" containerName="proxy-httpd" containerID="cri-o://6b6e40e5636a9a405ca504456fe3ac469d0bf34f5f35ee789bc2b3c7fd43ed8f" gracePeriod=30 Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.594510 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3d881977-4280-42f6-8ec5-65be97c8dc28" containerName="sg-core" containerID="cri-o://f4147284652162acb83f98dd4b38c821d03f77ed60c1b2b0c22836ec39ba3492" gracePeriod=30 Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.594557 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3d881977-4280-42f6-8ec5-65be97c8dc28" containerName="ceilometer-notification-agent" containerID="cri-o://b20fb279b37814b85e4faff0ba6be368ee421956a3846dd7b5ccb446665cf296" gracePeriod=30 Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.641314 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.641604 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/memcached-0" podUID="39608078-4c49-4ca6-b9d4-6cdd37d89f91" containerName="memcached" containerID="cri-o://8117e13cdc918455769d99f76275cbebcd1d57825a878291492d6665a99db931" gracePeriod=30 Jan 05 22:15:25 crc kubenswrapper[4910]: I0105 22:15:25.654969 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-5b71-account-create-update-5bc47"] Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:25.669758 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-5b71-account-create-update-5bc47"] Jan 05 22:15:26 crc kubenswrapper[4910]: E0105 22:15:25.702935 4910 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 05 22:15:26 crc kubenswrapper[4910]: E0105 22:15:25.702995 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b9cedfb5-8c45-434f-b04d-694bf6d600b8-config-data podName:b9cedfb5-8c45-434f-b04d-694bf6d600b8 nodeName:}" failed. No retries permitted until 2026-01-05 22:15:29.702979962 +0000 UTC m=+1461.280477632 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/b9cedfb5-8c45-434f-b04d-694bf6d600b8-config-data") pod "rabbitmq-cell1-server-0" (UID: "b9cedfb5-8c45-434f-b04d-694bf6d600b8") : configmap "rabbitmq-cell1-config-data" not found Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:25.712655 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-5b71-account-create-update-85zq8"] Jan 05 22:15:26 crc kubenswrapper[4910]: E0105 22:15:25.716237 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24f2eef4-3eac-4643-bffa-0747afae172a" containerName="proxy-httpd" Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:25.716260 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="24f2eef4-3eac-4643-bffa-0747afae172a" containerName="proxy-httpd" Jan 05 22:15:26 crc kubenswrapper[4910]: E0105 22:15:25.716290 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24f2eef4-3eac-4643-bffa-0747afae172a" containerName="proxy-server" Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:25.716298 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="24f2eef4-3eac-4643-bffa-0747afae172a" containerName="proxy-server" Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:25.716480 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="24f2eef4-3eac-4643-bffa-0747afae172a" containerName="proxy-httpd" Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:25.716498 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="24f2eef4-3eac-4643-bffa-0747afae172a" containerName="proxy-server" Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:25.732466 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5b71-account-create-update-85zq8" Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:25.744708 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:25.784180 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-5b71-account-create-update-85zq8"] Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:25.821110 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hg6f7\" (UniqueName: \"kubernetes.io/projected/da0001c8-7a9c-47a8-b901-03066bf6a7ff-kube-api-access-hg6f7\") pod \"keystone-5b71-account-create-update-85zq8\" (UID: \"da0001c8-7a9c-47a8-b901-03066bf6a7ff\") " pod="openstack/keystone-5b71-account-create-update-85zq8" Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:25.821315 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/da0001c8-7a9c-47a8-b901-03066bf6a7ff-operator-scripts\") pod \"keystone-5b71-account-create-update-85zq8\" (UID: \"da0001c8-7a9c-47a8-b901-03066bf6a7ff\") " pod="openstack/keystone-5b71-account-create-update-85zq8" Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:25.833050 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-wb5hf"] Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:25.852244 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-wb5hf"] Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:25.886454 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-gs27h"] Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:25.905315 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-gs27h"] Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:25.923466 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hg6f7\" (UniqueName: \"kubernetes.io/projected/da0001c8-7a9c-47a8-b901-03066bf6a7ff-kube-api-access-hg6f7\") pod \"keystone-5b71-account-create-update-85zq8\" (UID: \"da0001c8-7a9c-47a8-b901-03066bf6a7ff\") " pod="openstack/keystone-5b71-account-create-update-85zq8" Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:25.923930 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/da0001c8-7a9c-47a8-b901-03066bf6a7ff-operator-scripts\") pod \"keystone-5b71-account-create-update-85zq8\" (UID: \"da0001c8-7a9c-47a8-b901-03066bf6a7ff\") " pod="openstack/keystone-5b71-account-create-update-85zq8" Jan 05 22:15:26 crc kubenswrapper[4910]: E0105 22:15:25.924152 4910 configmap.go:193] Couldn't get configMap openstack/openstack-cell1-scripts: configmap "openstack-cell1-scripts" not found Jan 05 22:15:26 crc kubenswrapper[4910]: E0105 22:15:25.924253 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f509687a-bb68-4247-b4de-0f0cb99ca389-operator-scripts podName:f509687a-bb68-4247-b4de-0f0cb99ca389 nodeName:}" failed. No retries permitted until 2026-01-05 22:15:29.924235433 +0000 UTC m=+1461.501733103 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/f509687a-bb68-4247-b4de-0f0cb99ca389-operator-scripts") pod "root-account-create-update-lnk9j" (UID: "f509687a-bb68-4247-b4de-0f0cb99ca389") : configmap "openstack-cell1-scripts" not found Jan 05 22:15:26 crc kubenswrapper[4910]: E0105 22:15:25.924403 4910 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 05 22:15:26 crc kubenswrapper[4910]: E0105 22:15:25.924486 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/da0001c8-7a9c-47a8-b901-03066bf6a7ff-operator-scripts podName:da0001c8-7a9c-47a8-b901-03066bf6a7ff nodeName:}" failed. No retries permitted until 2026-01-05 22:15:26.424462639 +0000 UTC m=+1458.001960519 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/da0001c8-7a9c-47a8-b901-03066bf6a7ff-operator-scripts") pod "keystone-5b71-account-create-update-85zq8" (UID: "da0001c8-7a9c-47a8-b901-03066bf6a7ff") : configmap "openstack-scripts" not found Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:25.926903 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:25.936892 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="3486557d-93f8-44c2-b40a-dd8aca19d8e1" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.201:8775/\": dial tcp 10.217.0.201:8775: connect: connection refused" Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:25.936964 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="3486557d-93f8-44c2-b40a-dd8aca19d8e1" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.201:8775/\": dial tcp 10.217.0.201:8775: connect: connection refused" Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:25.936995 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-7bbfdb8fcf-zlpw8"] Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:25.937208 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/keystone-7bbfdb8fcf-zlpw8" podUID="97c873ec-c28a-4121-bac2-98b49c6b42a0" containerName="keystone-api" containerID="cri-o://4e8b2fc70196427c5c99643640fbe7135d80de9a670ca3af9c02eb288b8aa7e3" gracePeriod=30 Jan 05 22:15:26 crc kubenswrapper[4910]: E0105 22:15:25.951087 4910 projected.go:194] Error preparing data for projected volume kube-api-access-hg6f7 for pod openstack/keystone-5b71-account-create-update-85zq8: failed to fetch token: serviceaccounts "galera-openstack" not found Jan 05 22:15:26 crc kubenswrapper[4910]: E0105 22:15:25.951170 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/da0001c8-7a9c-47a8-b901-03066bf6a7ff-kube-api-access-hg6f7 podName:da0001c8-7a9c-47a8-b901-03066bf6a7ff nodeName:}" failed. No retries permitted until 2026-01-05 22:15:26.451151841 +0000 UTC m=+1458.028649501 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-hg6f7" (UniqueName: "kubernetes.io/projected/da0001c8-7a9c-47a8-b901-03066bf6a7ff-kube-api-access-hg6f7") pod "keystone-5b71-account-create-update-85zq8" (UID: "da0001c8-7a9c-47a8-b901-03066bf6a7ff") : failed to fetch token: serviceaccounts "galera-openstack" not found Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:25.965490 4910 generic.go:334] "Generic (PLEG): container finished" podID="3486557d-93f8-44c2-b40a-dd8aca19d8e1" containerID="9f4f5a94d78ccf55b8b88bf158362b3d9f7fee1d51111812e72271a6887b1360" exitCode=0 Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:25.965575 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3486557d-93f8-44c2-b40a-dd8aca19d8e1","Type":"ContainerDied","Data":"9f4f5a94d78ccf55b8b88bf158362b3d9f7fee1d51111812e72271a6887b1360"} Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:25.979174 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-lhd97"] Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:25.990906 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-9c2dv"] Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:25.996183 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-9c2dv"] Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.015486 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-5b71-account-create-update-85zq8"] Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.020129 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-787f96fcd6-44r4b" event={"ID":"244c7b09-d3d9-4ae7-864b-ff6758b0de6a","Type":"ContainerStarted","Data":"54383dd13947802c3b16a778eae08327fa8bdad5b46b80e000c52afe19c07cdc"} Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.020328 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-787f96fcd6-44r4b" podUID="244c7b09-d3d9-4ae7-864b-ff6758b0de6a" containerName="barbican-keystone-listener-log" containerID="cri-o://5f5f3c9fc2640058d48c86ff68f7c8aa4847482f6977cae797955ee8c5bef11c" gracePeriod=30 Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.021025 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-787f96fcd6-44r4b" podUID="244c7b09-d3d9-4ae7-864b-ff6758b0de6a" containerName="barbican-keystone-listener" containerID="cri-o://54383dd13947802c3b16a778eae08327fa8bdad5b46b80e000c52afe19c07cdc" gracePeriod=30 Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.060454 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-787f96fcd6-44r4b" podStartSLOduration=7.060432673 podStartE2EDuration="7.060432673s" podCreationTimestamp="2026-01-05 22:15:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 22:15:26.054833404 +0000 UTC m=+1457.632331074" watchObservedRunningTime="2026-01-05 22:15:26.060432673 +0000 UTC m=+1457.637930343" Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.064321 4910 generic.go:334] "Generic (PLEG): container finished" podID="70100901-0709-4900-ac75-462a85b350c3" containerID="1e10056784aaab7edb53371b1e8ee1b1dfc4d02346c220a12403d46024abfaa4" exitCode=0 Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.064435 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"70100901-0709-4900-ac75-462a85b350c3","Type":"ContainerDied","Data":"1e10056784aaab7edb53371b1e8ee1b1dfc4d02346c220a12403d46024abfaa4"} Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.081830 4910 generic.go:334] "Generic (PLEG): container finished" podID="b651f520-1463-434f-b16f-edd2b1b8f8d9" containerID="7eb793854dd2ca885d20aea4858a9baa5dd3b5bf64d04ad614881d8b63b82097" exitCode=2 Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.082174 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"b651f520-1463-434f-b16f-edd2b1b8f8d9","Type":"ContainerDied","Data":"7eb793854dd2ca885d20aea4858a9baa5dd3b5bf64d04ad614881d8b63b82097"} Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.092002 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-1a42-account-create-update-4vz6m" event={"ID":"e63178b0-da1f-4d9c-b680-9fdddcd51b9a","Type":"ContainerDied","Data":"7aafa0cd8b6ea05765d153dbf5b69174a4161982467506c6ba1771fefee84fff"} Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.092051 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7aafa0cd8b6ea05765d153dbf5b69174a4161982467506c6ba1771fefee84fff" Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.095325 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-2c36-account-create-update-777vv" event={"ID":"04ef3843-8448-4842-aaf3-7e2bcc428122","Type":"ContainerDied","Data":"e7dd0d0430e868fa89ae86c6a1ba97b2a5e130023f1320d7bea349173d16f570"} Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.095362 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e7dd0d0430e868fa89ae86c6a1ba97b2a5e130023f1320d7bea349173d16f570" Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.121959 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-b0f9-account-create-update-f5pmg" event={"ID":"8db5b2ca-0224-4f13-b7d6-be9fb7aa7e07","Type":"ContainerDied","Data":"5c5f88a8b3403353a689e87e9b5123382746270dd507c4cfd96ca043cbf78c77"} Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.122008 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5c5f88a8b3403353a689e87e9b5123382746270dd507c4cfd96ca043cbf78c77" Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.134049 4910 generic.go:334] "Generic (PLEG): container finished" podID="07efd759-c536-425d-938e-a8ccd41706cd" containerID="a0e248b48425380302b1988bb335f1102fb9d344cce326d7af9e5dd2f6475bc5" exitCode=0 Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.134136 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"07efd759-c536-425d-938e-a8ccd41706cd","Type":"ContainerDied","Data":"a0e248b48425380302b1988bb335f1102fb9d344cce326d7af9e5dd2f6475bc5"} Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.134168 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"07efd759-c536-425d-938e-a8ccd41706cd","Type":"ContainerDied","Data":"df4ee361dfedceaf2c81953233c13b2456b83fc8b0f6d991dc2731dd08887cbf"} Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.134181 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="df4ee361dfedceaf2c81953233c13b2456b83fc8b0f6d991dc2731dd08887cbf" Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.148185 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-lnk9j" event={"ID":"f509687a-bb68-4247-b4de-0f0cb99ca389","Type":"ContainerDied","Data":"362337c9ef5e7da45f6ff21f34ff3984bb831ae81a7ed1bec943f6f7b14571ec"} Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.148239 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="362337c9ef5e7da45f6ff21f34ff3984bb831ae81a7ed1bec943f6f7b14571ec" Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.157441 4910 generic.go:334] "Generic (PLEG): container finished" podID="45acd92f-2e5d-4fc1-8b91-c91f165e786a" containerID="a005751f16bf05306ffd138b7900c870797084700111340ccf797cab547f6f2e" exitCode=0 Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.157540 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" event={"ID":"45acd92f-2e5d-4fc1-8b91-c91f165e786a","Type":"ContainerDied","Data":"a005751f16bf05306ffd138b7900c870797084700111340ccf797cab547f6f2e"} Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.170031 4910 generic.go:334] "Generic (PLEG): container finished" podID="dc0e5b95-8658-440f-8771-c67a74098057" containerID="dd028682666330bff13245ee6ff70f7e9c71b736d8ba15cebcc6d55a428021f4" exitCode=143 Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.170105 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-d6c5d94b9-llc4f" event={"ID":"dc0e5b95-8658-440f-8771-c67a74098057","Type":"ContainerDied","Data":"dd028682666330bff13245ee6ff70f7e9c71b736d8ba15cebcc6d55a428021f4"} Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.171652 4910 generic.go:334] "Generic (PLEG): container finished" podID="b29bf6bd-079e-4e8b-bec6-49d4923676af" containerID="008ff3c44ce49caf6caea7aa9f55cfc608a8d5e702630f035b8953f4de51ddc1" exitCode=0 Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.171692 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7687b85c5d-l8k6w" event={"ID":"b29bf6bd-079e-4e8b-bec6-49d4923676af","Type":"ContainerDied","Data":"008ff3c44ce49caf6caea7aa9f55cfc608a8d5e702630f035b8953f4de51ddc1"} Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.173576 4910 generic.go:334] "Generic (PLEG): container finished" podID="3d881977-4280-42f6-8ec5-65be97c8dc28" containerID="f4147284652162acb83f98dd4b38c821d03f77ed60c1b2b0c22836ec39ba3492" exitCode=2 Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.173611 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d881977-4280-42f6-8ec5-65be97c8dc28","Type":"ContainerDied","Data":"f4147284652162acb83f98dd4b38c821d03f77ed60c1b2b0c22836ec39ba3492"} Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.175640 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"70694d65-fa64-4667-b1aa-bac50650687c","Type":"ContainerDied","Data":"efabaebeda908dacb3dd8334719822a571f4a923ae770b61178621243975e318"} Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.175661 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="efabaebeda908dacb3dd8334719822a571f4a923ae770b61178621243975e318" Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.177744 4910 generic.go:334] "Generic (PLEG): container finished" podID="8f43d30e-14e4-4978-bb02-a251305f9330" containerID="e302bde0bc25b21936e7ca65ca2849db5acaa0ddf0792ac1f5ffccee28c53746" exitCode=0 Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.177765 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8f43d30e-14e4-4978-bb02-a251305f9330","Type":"ContainerDied","Data":"e302bde0bc25b21936e7ca65ca2849db5acaa0ddf0792ac1f5ffccee28c53746"} Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.478305 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hg6f7\" (UniqueName: \"kubernetes.io/projected/da0001c8-7a9c-47a8-b901-03066bf6a7ff-kube-api-access-hg6f7\") pod \"keystone-5b71-account-create-update-85zq8\" (UID: \"da0001c8-7a9c-47a8-b901-03066bf6a7ff\") " pod="openstack/keystone-5b71-account-create-update-85zq8" Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.478441 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/da0001c8-7a9c-47a8-b901-03066bf6a7ff-operator-scripts\") pod \"keystone-5b71-account-create-update-85zq8\" (UID: \"da0001c8-7a9c-47a8-b901-03066bf6a7ff\") " pod="openstack/keystone-5b71-account-create-update-85zq8" Jan 05 22:15:26 crc kubenswrapper[4910]: E0105 22:15:26.478606 4910 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 05 22:15:26 crc kubenswrapper[4910]: E0105 22:15:26.478658 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/da0001c8-7a9c-47a8-b901-03066bf6a7ff-operator-scripts podName:da0001c8-7a9c-47a8-b901-03066bf6a7ff nodeName:}" failed. No retries permitted until 2026-01-05 22:15:27.478642172 +0000 UTC m=+1459.056139832 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/da0001c8-7a9c-47a8-b901-03066bf6a7ff-operator-scripts") pod "keystone-5b71-account-create-update-85zq8" (UID: "da0001c8-7a9c-47a8-b901-03066bf6a7ff") : configmap "openstack-scripts" not found Jan 05 22:15:26 crc kubenswrapper[4910]: E0105 22:15:26.482921 4910 projected.go:194] Error preparing data for projected volume kube-api-access-hg6f7 for pod openstack/keystone-5b71-account-create-update-85zq8: failed to fetch token: serviceaccounts "galera-openstack" not found Jan 05 22:15:26 crc kubenswrapper[4910]: E0105 22:15:26.483018 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/da0001c8-7a9c-47a8-b901-03066bf6a7ff-kube-api-access-hg6f7 podName:da0001c8-7a9c-47a8-b901-03066bf6a7ff nodeName:}" failed. No retries permitted until 2026-01-05 22:15:27.48299489 +0000 UTC m=+1459.060492560 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-hg6f7" (UniqueName: "kubernetes.io/projected/da0001c8-7a9c-47a8-b901-03066bf6a7ff-kube-api-access-hg6f7") pod "keystone-5b71-account-create-update-85zq8" (UID: "da0001c8-7a9c-47a8-b901-03066bf6a7ff") : failed to fetch token: serviceaccounts "galera-openstack" not found Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.547252 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-galera-0" podUID="2cb18efe-a80d-4657-921d-af4a18ae279d" containerName="galera" containerID="cri-o://631d63a96f64fb0aa20db63e43afb3158c0927307ea2182cd6951a7f9852fdca" gracePeriod=30 Jan 05 22:15:26 crc kubenswrapper[4910]: E0105 22:15:26.732511 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 38699171184dfa46b8af02c0e7a8bf314316f1f3e8f7f4d2c59c764a37fae22a is running failed: container process not found" containerID="38699171184dfa46b8af02c0e7a8bf314316f1f3e8f7f4d2c59c764a37fae22a" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 05 22:15:26 crc kubenswrapper[4910]: E0105 22:15:26.733575 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 38699171184dfa46b8af02c0e7a8bf314316f1f3e8f7f4d2c59c764a37fae22a is running failed: container process not found" containerID="38699171184dfa46b8af02c0e7a8bf314316f1f3e8f7f4d2c59c764a37fae22a" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 05 22:15:26 crc kubenswrapper[4910]: E0105 22:15:26.735113 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 38699171184dfa46b8af02c0e7a8bf314316f1f3e8f7f4d2c59c764a37fae22a is running failed: container process not found" containerID="38699171184dfa46b8af02c0e7a8bf314316f1f3e8f7f4d2c59c764a37fae22a" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 05 22:15:26 crc kubenswrapper[4910]: E0105 22:15:26.735228 4910 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 38699171184dfa46b8af02c0e7a8bf314316f1f3e8f7f4d2c59c764a37fae22a is running failed: container process not found" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="de8aafdf-9b35-4c41-8726-6c7e86edee5f" containerName="nova-cell0-conductor-conductor" Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.738704 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="72da5def-cd27-4431-a7da-04b32457cdb1" path="/var/lib/kubelet/pods/72da5def-cd27-4431-a7da-04b32457cdb1/volumes" Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.739967 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="80f364a3-6407-463e-9565-a3bb43cb1494" path="/var/lib/kubelet/pods/80f364a3-6407-463e-9565-a3bb43cb1494/volumes" Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.741241 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="910bd239-dc3a-47a1-9a25-5e21046f9725" path="/var/lib/kubelet/pods/910bd239-dc3a-47a1-9a25-5e21046f9725/volumes" Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.741891 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ea2fcf34-e416-43a2-a488-a9e952d19b81" path="/var/lib/kubelet/pods/ea2fcf34-e416-43a2-a488-a9e952d19b81/volumes" Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.805184 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.805951 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-lhd97"] Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.829255 4910 scope.go:117] "RemoveContainer" containerID="7dc2abc97367e4404626e780ecb73ce8187a7fc4d65dc7e368107933d3d9b81a" Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.833925 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-2c36-account-create-update-777vv" Jan 05 22:15:26 crc kubenswrapper[4910]: W0105 22:15:26.863307 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2210f0ce_43b3_4560_84f8_b56a65414758.slice/crio-dea86c9f2fc512e486561b6b4a33e3b68be87ab38970e393e203c62d2cfd327c WatchSource:0}: Error finding container dea86c9f2fc512e486561b6b4a33e3b68be87ab38970e393e203c62d2cfd327c: Status 404 returned error can't find the container with id dea86c9f2fc512e486561b6b4a33e3b68be87ab38970e393e203c62d2cfd327c Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.907110 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-6b8d97d96d-jbcrk"] Jan 05 22:15:26 crc kubenswrapper[4910]: E0105 22:15:26.914267 4910 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 05 22:15:26 crc kubenswrapper[4910]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 05 22:15:26 crc kubenswrapper[4910]: Jan 05 22:15:26 crc kubenswrapper[4910]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 05 22:15:26 crc kubenswrapper[4910]: Jan 05 22:15:26 crc kubenswrapper[4910]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 05 22:15:26 crc kubenswrapper[4910]: Jan 05 22:15:26 crc kubenswrapper[4910]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 05 22:15:26 crc kubenswrapper[4910]: Jan 05 22:15:26 crc kubenswrapper[4910]: if [ -n "" ]; then Jan 05 22:15:26 crc kubenswrapper[4910]: GRANT_DATABASE="" Jan 05 22:15:26 crc kubenswrapper[4910]: else Jan 05 22:15:26 crc kubenswrapper[4910]: GRANT_DATABASE="*" Jan 05 22:15:26 crc kubenswrapper[4910]: fi Jan 05 22:15:26 crc kubenswrapper[4910]: Jan 05 22:15:26 crc kubenswrapper[4910]: # going for maximum compatibility here: Jan 05 22:15:26 crc kubenswrapper[4910]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 05 22:15:26 crc kubenswrapper[4910]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 05 22:15:26 crc kubenswrapper[4910]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 05 22:15:26 crc kubenswrapper[4910]: # support updates Jan 05 22:15:26 crc kubenswrapper[4910]: Jan 05 22:15:26 crc kubenswrapper[4910]: $MYSQL_CMD < logger="UnhandledError" Jan 05 22:15:26 crc kubenswrapper[4910]: E0105 22:15:26.919703 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"openstack-mariadb-root-db-secret\\\" not found\"" pod="openstack/root-account-create-update-lhd97" podUID="2210f0ce-43b3-4560-84f8-b56a65414758" Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.930838 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-6b8d97d96d-jbcrk"] Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.944308 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.955219 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.960568 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-b0f9-account-create-update-f5pmg" Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.970475 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.974540 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1a42-account-create-update-4vz6m" Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.980832 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.986360 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-lnk9j" Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.998476 4910 scope.go:117] "RemoveContainer" containerID="9508ef451ff0f7e73dc0cfea8eda8b03067704bfee4c29361c6f466617631e69" Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.998894 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vxsxb\" (UniqueName: \"kubernetes.io/projected/70694d65-fa64-4667-b1aa-bac50650687c-kube-api-access-vxsxb\") pod \"70694d65-fa64-4667-b1aa-bac50650687c\" (UID: \"70694d65-fa64-4667-b1aa-bac50650687c\") " Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.999002 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dvn7m\" (UniqueName: \"kubernetes.io/projected/04ef3843-8448-4842-aaf3-7e2bcc428122-kube-api-access-dvn7m\") pod \"04ef3843-8448-4842-aaf3-7e2bcc428122\" (UID: \"04ef3843-8448-4842-aaf3-7e2bcc428122\") " Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.999190 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70694d65-fa64-4667-b1aa-bac50650687c-combined-ca-bundle\") pod \"70694d65-fa64-4667-b1aa-bac50650687c\" (UID: \"70694d65-fa64-4667-b1aa-bac50650687c\") " Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.999235 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/04ef3843-8448-4842-aaf3-7e2bcc428122-operator-scripts\") pod \"04ef3843-8448-4842-aaf3-7e2bcc428122\" (UID: \"04ef3843-8448-4842-aaf3-7e2bcc428122\") " Jan 05 22:15:26 crc kubenswrapper[4910]: I0105 22:15:26.999294 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70694d65-fa64-4667-b1aa-bac50650687c-config-data\") pod \"70694d65-fa64-4667-b1aa-bac50650687c\" (UID: \"70694d65-fa64-4667-b1aa-bac50650687c\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.000377 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/04ef3843-8448-4842-aaf3-7e2bcc428122-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "04ef3843-8448-4842-aaf3-7e2bcc428122" (UID: "04ef3843-8448-4842-aaf3-7e2bcc428122"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.009536 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/70694d65-fa64-4667-b1aa-bac50650687c-kube-api-access-vxsxb" (OuterVolumeSpecName: "kube-api-access-vxsxb") pod "70694d65-fa64-4667-b1aa-bac50650687c" (UID: "70694d65-fa64-4667-b1aa-bac50650687c"). InnerVolumeSpecName "kube-api-access-vxsxb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.024737 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.027914 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04ef3843-8448-4842-aaf3-7e2bcc428122-kube-api-access-dvn7m" (OuterVolumeSpecName: "kube-api-access-dvn7m") pod "04ef3843-8448-4842-aaf3-7e2bcc428122" (UID: "04ef3843-8448-4842-aaf3-7e2bcc428122"). InnerVolumeSpecName "kube-api-access-dvn7m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.031004 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.039302 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.040333 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7687b85c5d-l8k6w" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.041081 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/70694d65-fa64-4667-b1aa-bac50650687c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "70694d65-fa64-4667-b1aa-bac50650687c" (UID: "70694d65-fa64-4667-b1aa-bac50650687c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.042834 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/70694d65-fa64-4667-b1aa-bac50650687c-config-data" (OuterVolumeSpecName: "config-data") pod "70694d65-fa64-4667-b1aa-bac50650687c" (UID: "70694d65-fa64-4667-b1aa-bac50650687c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.051018 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-b7b888cd9-zwrvg"] Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.059339 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-proxy-b7b888cd9-zwrvg"] Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.064020 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 05 22:15:27 crc kubenswrapper[4910]: E0105 22:15:27.078437 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-hg6f7 operator-scripts], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/keystone-5b71-account-create-update-85zq8" podUID="da0001c8-7a9c-47a8-b901-03066bf6a7ff" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.090689 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.093647 4910 scope.go:117] "RemoveContainer" containerID="228c36e7a0ee3706097ffefa2fffb06644ad6938f9d1c4b5c163a457e6f37c30" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.103977 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dgdr2\" (UniqueName: \"kubernetes.io/projected/8db5b2ca-0224-4f13-b7d6-be9fb7aa7e07-kube-api-access-dgdr2\") pod \"8db5b2ca-0224-4f13-b7d6-be9fb7aa7e07\" (UID: \"8db5b2ca-0224-4f13-b7d6-be9fb7aa7e07\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.104032 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f509687a-bb68-4247-b4de-0f0cb99ca389-operator-scripts\") pod \"f509687a-bb68-4247-b4de-0f0cb99ca389\" (UID: \"f509687a-bb68-4247-b4de-0f0cb99ca389\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.104065 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8db5b2ca-0224-4f13-b7d6-be9fb7aa7e07-operator-scripts\") pod \"8db5b2ca-0224-4f13-b7d6-be9fb7aa7e07\" (UID: \"8db5b2ca-0224-4f13-b7d6-be9fb7aa7e07\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.104130 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8bjtk\" (UniqueName: \"kubernetes.io/projected/f509687a-bb68-4247-b4de-0f0cb99ca389-kube-api-access-8bjtk\") pod \"f509687a-bb68-4247-b4de-0f0cb99ca389\" (UID: \"f509687a-bb68-4247-b4de-0f0cb99ca389\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.104166 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e63178b0-da1f-4d9c-b680-9fdddcd51b9a-operator-scripts\") pod \"e63178b0-da1f-4d9c-b680-9fdddcd51b9a\" (UID: \"e63178b0-da1f-4d9c-b680-9fdddcd51b9a\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.104197 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wcphr\" (UniqueName: \"kubernetes.io/projected/e63178b0-da1f-4d9c-b680-9fdddcd51b9a-kube-api-access-wcphr\") pod \"e63178b0-da1f-4d9c-b680-9fdddcd51b9a\" (UID: \"e63178b0-da1f-4d9c-b680-9fdddcd51b9a\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.104583 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70694d65-fa64-4667-b1aa-bac50650687c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.104610 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/04ef3843-8448-4842-aaf3-7e2bcc428122-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.104618 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70694d65-fa64-4667-b1aa-bac50650687c-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.104627 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jtrfx\" (UniqueName: \"kubernetes.io/projected/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69-kube-api-access-jtrfx\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.104636 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vxsxb\" (UniqueName: \"kubernetes.io/projected/70694d65-fa64-4667-b1aa-bac50650687c-kube-api-access-vxsxb\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.104645 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dvn7m\" (UniqueName: \"kubernetes.io/projected/04ef3843-8448-4842-aaf3-7e2bcc428122-kube-api-access-dvn7m\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.108019 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.108450 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e63178b0-da1f-4d9c-b680-9fdddcd51b9a-kube-api-access-wcphr" (OuterVolumeSpecName: "kube-api-access-wcphr") pod "e63178b0-da1f-4d9c-b680-9fdddcd51b9a" (UID: "e63178b0-da1f-4d9c-b680-9fdddcd51b9a"). InnerVolumeSpecName "kube-api-access-wcphr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.108445 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8db5b2ca-0224-4f13-b7d6-be9fb7aa7e07-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8db5b2ca-0224-4f13-b7d6-be9fb7aa7e07" (UID: "8db5b2ca-0224-4f13-b7d6-be9fb7aa7e07"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.109105 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f509687a-bb68-4247-b4de-0f0cb99ca389-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f509687a-bb68-4247-b4de-0f0cb99ca389" (UID: "f509687a-bb68-4247-b4de-0f0cb99ca389"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.109552 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e63178b0-da1f-4d9c-b680-9fdddcd51b9a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e63178b0-da1f-4d9c-b680-9fdddcd51b9a" (UID: "e63178b0-da1f-4d9c-b680-9fdddcd51b9a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.112431 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8db5b2ca-0224-4f13-b7d6-be9fb7aa7e07-kube-api-access-dgdr2" (OuterVolumeSpecName: "kube-api-access-dgdr2") pod "8db5b2ca-0224-4f13-b7d6-be9fb7aa7e07" (UID: "8db5b2ca-0224-4f13-b7d6-be9fb7aa7e07"). InnerVolumeSpecName "kube-api-access-dgdr2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.112939 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f509687a-bb68-4247-b4de-0f0cb99ca389-kube-api-access-8bjtk" (OuterVolumeSpecName: "kube-api-access-8bjtk") pod "f509687a-bb68-4247-b4de-0f0cb99ca389" (UID: "f509687a-bb68-4247-b4de-0f0cb99ca389"). InnerVolumeSpecName "kube-api-access-8bjtk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.114025 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.119410 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.126277 4910 scope.go:117] "RemoveContainer" containerID="484d4b08e459d7050c6c22231b91516731f4932da1c971f389be1e4993a99ee9" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.127138 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.136471 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-78b74ccb54-wvrcf" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.157054 4910 scope.go:117] "RemoveContainer" containerID="228c36e7a0ee3706097ffefa2fffb06644ad6938f9d1c4b5c163a457e6f37c30" Jan 05 22:15:27 crc kubenswrapper[4910]: E0105 22:15:27.157773 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"228c36e7a0ee3706097ffefa2fffb06644ad6938f9d1c4b5c163a457e6f37c30\": container with ID starting with 228c36e7a0ee3706097ffefa2fffb06644ad6938f9d1c4b5c163a457e6f37c30 not found: ID does not exist" containerID="228c36e7a0ee3706097ffefa2fffb06644ad6938f9d1c4b5c163a457e6f37c30" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.157832 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"228c36e7a0ee3706097ffefa2fffb06644ad6938f9d1c4b5c163a457e6f37c30"} err="failed to get container status \"228c36e7a0ee3706097ffefa2fffb06644ad6938f9d1c4b5c163a457e6f37c30\": rpc error: code = NotFound desc = could not find container \"228c36e7a0ee3706097ffefa2fffb06644ad6938f9d1c4b5c163a457e6f37c30\": container with ID starting with 228c36e7a0ee3706097ffefa2fffb06644ad6938f9d1c4b5c163a457e6f37c30 not found: ID does not exist" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.157868 4910 scope.go:117] "RemoveContainer" containerID="484d4b08e459d7050c6c22231b91516731f4932da1c971f389be1e4993a99ee9" Jan 05 22:15:27 crc kubenswrapper[4910]: E0105 22:15:27.161928 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"484d4b08e459d7050c6c22231b91516731f4932da1c971f389be1e4993a99ee9\": container with ID starting with 484d4b08e459d7050c6c22231b91516731f4932da1c971f389be1e4993a99ee9 not found: ID does not exist" containerID="484d4b08e459d7050c6c22231b91516731f4932da1c971f389be1e4993a99ee9" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.161979 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"484d4b08e459d7050c6c22231b91516731f4932da1c971f389be1e4993a99ee9"} err="failed to get container status \"484d4b08e459d7050c6c22231b91516731f4932da1c971f389be1e4993a99ee9\": rpc error: code = NotFound desc = could not find container \"484d4b08e459d7050c6c22231b91516731f4932da1c971f389be1e4993a99ee9\": container with ID starting with 484d4b08e459d7050c6c22231b91516731f4932da1c971f389be1e4993a99ee9 not found: ID does not exist" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.162012 4910 scope.go:117] "RemoveContainer" containerID="228c36e7a0ee3706097ffefa2fffb06644ad6938f9d1c4b5c163a457e6f37c30" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.162469 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"228c36e7a0ee3706097ffefa2fffb06644ad6938f9d1c4b5c163a457e6f37c30"} err="failed to get container status \"228c36e7a0ee3706097ffefa2fffb06644ad6938f9d1c4b5c163a457e6f37c30\": rpc error: code = NotFound desc = could not find container \"228c36e7a0ee3706097ffefa2fffb06644ad6938f9d1c4b5c163a457e6f37c30\": container with ID starting with 228c36e7a0ee3706097ffefa2fffb06644ad6938f9d1c4b5c163a457e6f37c30 not found: ID does not exist" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.162520 4910 scope.go:117] "RemoveContainer" containerID="484d4b08e459d7050c6c22231b91516731f4932da1c971f389be1e4993a99ee9" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.162833 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"484d4b08e459d7050c6c22231b91516731f4932da1c971f389be1e4993a99ee9"} err="failed to get container status \"484d4b08e459d7050c6c22231b91516731f4932da1c971f389be1e4993a99ee9\": rpc error: code = NotFound desc = could not find container \"484d4b08e459d7050c6c22231b91516731f4932da1c971f389be1e4993a99ee9\": container with ID starting with 484d4b08e459d7050c6c22231b91516731f4932da1c971f389be1e4993a99ee9 not found: ID does not exist" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.162858 4910 scope.go:117] "RemoveContainer" containerID="e56637041d9755fd6fda8b6ee2207de4c4a054e4001e101db38b784bf6a8eb7a" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.205876 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b29bf6bd-079e-4e8b-bec6-49d4923676af-logs\") pod \"b29bf6bd-079e-4e8b-bec6-49d4923676af\" (UID: \"b29bf6bd-079e-4e8b-bec6-49d4923676af\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.206109 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-combined-ca-bundle\") pod \"07efd759-c536-425d-938e-a8ccd41706cd\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.206204 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b29bf6bd-079e-4e8b-bec6-49d4923676af-scripts\") pod \"b29bf6bd-079e-4e8b-bec6-49d4923676af\" (UID: \"b29bf6bd-079e-4e8b-bec6-49d4923676af\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.206287 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vrztf\" (UniqueName: \"kubernetes.io/projected/b29bf6bd-079e-4e8b-bec6-49d4923676af-kube-api-access-vrztf\") pod \"b29bf6bd-079e-4e8b-bec6-49d4923676af\" (UID: \"b29bf6bd-079e-4e8b-bec6-49d4923676af\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.206392 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8f43d30e-14e4-4978-bb02-a251305f9330-httpd-run\") pod \"8f43d30e-14e4-4978-bb02-a251305f9330\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.206457 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/07efd759-c536-425d-938e-a8ccd41706cd-etc-machine-id\") pod \"07efd759-c536-425d-938e-a8ccd41706cd\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.206535 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/45acd92f-2e5d-4fc1-8b91-c91f165e786a-public-tls-certs\") pod \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\" (UID: \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.206595 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-public-tls-certs\") pod \"07efd759-c536-425d-938e-a8ccd41706cd\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.206663 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8f43d30e-14e4-4978-bb02-a251305f9330-logs\") pod \"8f43d30e-14e4-4978-bb02-a251305f9330\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.206741 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9srqw\" (UniqueName: \"kubernetes.io/projected/3486557d-93f8-44c2-b40a-dd8aca19d8e1-kube-api-access-9srqw\") pod \"3486557d-93f8-44c2-b40a-dd8aca19d8e1\" (UID: \"3486557d-93f8-44c2-b40a-dd8aca19d8e1\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.206804 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9jdb2\" (UniqueName: \"kubernetes.io/projected/8f43d30e-14e4-4978-bb02-a251305f9330-kube-api-access-9jdb2\") pod \"8f43d30e-14e4-4978-bb02-a251305f9330\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.206863 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b29bf6bd-079e-4e8b-bec6-49d4923676af-public-tls-certs\") pod \"b29bf6bd-079e-4e8b-bec6-49d4923676af\" (UID: \"b29bf6bd-079e-4e8b-bec6-49d4923676af\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.206929 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzmmb\" (UniqueName: \"kubernetes.io/projected/45acd92f-2e5d-4fc1-8b91-c91f165e786a-kube-api-access-lzmmb\") pod \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\" (UID: \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.207004 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8f43d30e-14e4-4978-bb02-a251305f9330-public-tls-certs\") pod \"8f43d30e-14e4-4978-bb02-a251305f9330\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.207079 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/45acd92f-2e5d-4fc1-8b91-c91f165e786a-internal-tls-certs\") pod \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\" (UID: \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.207183 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/45acd92f-2e5d-4fc1-8b91-c91f165e786a-config-data-custom\") pod \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\" (UID: \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.207269 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-config-data\") pod \"07efd759-c536-425d-938e-a8ccd41706cd\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.207337 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-scripts\") pod \"07efd759-c536-425d-938e-a8ccd41706cd\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.207412 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"8f43d30e-14e4-4978-bb02-a251305f9330\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.207484 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9p6gw\" (UniqueName: \"kubernetes.io/projected/07efd759-c536-425d-938e-a8ccd41706cd-kube-api-access-9p6gw\") pod \"07efd759-c536-425d-938e-a8ccd41706cd\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.207557 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/3486557d-93f8-44c2-b40a-dd8aca19d8e1-nova-metadata-tls-certs\") pod \"3486557d-93f8-44c2-b40a-dd8aca19d8e1\" (UID: \"3486557d-93f8-44c2-b40a-dd8aca19d8e1\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.207623 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45acd92f-2e5d-4fc1-8b91-c91f165e786a-config-data\") pod \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\" (UID: \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.207708 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-internal-tls-certs\") pod \"07efd759-c536-425d-938e-a8ccd41706cd\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.207785 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/45acd92f-2e5d-4fc1-8b91-c91f165e786a-logs\") pod \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\" (UID: \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.207854 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b29bf6bd-079e-4e8b-bec6-49d4923676af-internal-tls-certs\") pod \"b29bf6bd-079e-4e8b-bec6-49d4923676af\" (UID: \"b29bf6bd-079e-4e8b-bec6-49d4923676af\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.207920 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b29bf6bd-079e-4e8b-bec6-49d4923676af-combined-ca-bundle\") pod \"b29bf6bd-079e-4e8b-bec6-49d4923676af\" (UID: \"b29bf6bd-079e-4e8b-bec6-49d4923676af\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.207988 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45acd92f-2e5d-4fc1-8b91-c91f165e786a-combined-ca-bundle\") pod \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\" (UID: \"45acd92f-2e5d-4fc1-8b91-c91f165e786a\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.208076 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f43d30e-14e4-4978-bb02-a251305f9330-scripts\") pod \"8f43d30e-14e4-4978-bb02-a251305f9330\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.208168 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b29bf6bd-079e-4e8b-bec6-49d4923676af-config-data\") pod \"b29bf6bd-079e-4e8b-bec6-49d4923676af\" (UID: \"b29bf6bd-079e-4e8b-bec6-49d4923676af\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.208253 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f43d30e-14e4-4978-bb02-a251305f9330-config-data\") pod \"8f43d30e-14e4-4978-bb02-a251305f9330\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.208328 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3486557d-93f8-44c2-b40a-dd8aca19d8e1-logs\") pod \"3486557d-93f8-44c2-b40a-dd8aca19d8e1\" (UID: \"3486557d-93f8-44c2-b40a-dd8aca19d8e1\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.208395 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3486557d-93f8-44c2-b40a-dd8aca19d8e1-combined-ca-bundle\") pod \"3486557d-93f8-44c2-b40a-dd8aca19d8e1\" (UID: \"3486557d-93f8-44c2-b40a-dd8aca19d8e1\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.208457 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3486557d-93f8-44c2-b40a-dd8aca19d8e1-config-data\") pod \"3486557d-93f8-44c2-b40a-dd8aca19d8e1\" (UID: \"3486557d-93f8-44c2-b40a-dd8aca19d8e1\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.208521 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f43d30e-14e4-4978-bb02-a251305f9330-combined-ca-bundle\") pod \"8f43d30e-14e4-4978-bb02-a251305f9330\" (UID: \"8f43d30e-14e4-4978-bb02-a251305f9330\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.208681 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07efd759-c536-425d-938e-a8ccd41706cd-logs\") pod \"07efd759-c536-425d-938e-a8ccd41706cd\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.208744 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-config-data-custom\") pod \"07efd759-c536-425d-938e-a8ccd41706cd\" (UID: \"07efd759-c536-425d-938e-a8ccd41706cd\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.210334 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dgdr2\" (UniqueName: \"kubernetes.io/projected/8db5b2ca-0224-4f13-b7d6-be9fb7aa7e07-kube-api-access-dgdr2\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.210425 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f509687a-bb68-4247-b4de-0f0cb99ca389-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.210484 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8db5b2ca-0224-4f13-b7d6-be9fb7aa7e07-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.210535 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8bjtk\" (UniqueName: \"kubernetes.io/projected/f509687a-bb68-4247-b4de-0f0cb99ca389-kube-api-access-8bjtk\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.210591 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e63178b0-da1f-4d9c-b680-9fdddcd51b9a-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.210643 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wcphr\" (UniqueName: \"kubernetes.io/projected/e63178b0-da1f-4d9c-b680-9fdddcd51b9a-kube-api-access-wcphr\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.206744 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b29bf6bd-079e-4e8b-bec6-49d4923676af-logs" (OuterVolumeSpecName: "logs") pod "b29bf6bd-079e-4e8b-bec6-49d4923676af" (UID: "b29bf6bd-079e-4e8b-bec6-49d4923676af"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.206786 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/07efd759-c536-425d-938e-a8ccd41706cd-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "07efd759-c536-425d-938e-a8ccd41706cd" (UID: "07efd759-c536-425d-938e-a8ccd41706cd"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.210890 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f43d30e-14e4-4978-bb02-a251305f9330-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "8f43d30e-14e4-4978-bb02-a251305f9330" (UID: "8f43d30e-14e4-4978-bb02-a251305f9330"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.212197 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b29bf6bd-079e-4e8b-bec6-49d4923676af-scripts" (OuterVolumeSpecName: "scripts") pod "b29bf6bd-079e-4e8b-bec6-49d4923676af" (UID: "b29bf6bd-079e-4e8b-bec6-49d4923676af"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.212518 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f43d30e-14e4-4978-bb02-a251305f9330-logs" (OuterVolumeSpecName: "logs") pod "8f43d30e-14e4-4978-bb02-a251305f9330" (UID: "8f43d30e-14e4-4978-bb02-a251305f9330"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.212362 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/45acd92f-2e5d-4fc1-8b91-c91f165e786a-logs" (OuterVolumeSpecName: "logs") pod "45acd92f-2e5d-4fc1-8b91-c91f165e786a" (UID: "45acd92f-2e5d-4fc1-8b91-c91f165e786a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.212847 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b29bf6bd-079e-4e8b-bec6-49d4923676af-kube-api-access-vrztf" (OuterVolumeSpecName: "kube-api-access-vrztf") pod "b29bf6bd-079e-4e8b-bec6-49d4923676af" (UID: "b29bf6bd-079e-4e8b-bec6-49d4923676af"). InnerVolumeSpecName "kube-api-access-vrztf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.222970 4910 scope.go:117] "RemoveContainer" containerID="aa48fa221aab1fca8baf355d7d5b238e363506882c0807675e58c0556680cf81" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.224037 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07efd759-c536-425d-938e-a8ccd41706cd-logs" (OuterVolumeSpecName: "logs") pod "07efd759-c536-425d-938e-a8ccd41706cd" (UID: "07efd759-c536-425d-938e-a8ccd41706cd"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.224491 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3486557d-93f8-44c2-b40a-dd8aca19d8e1-logs" (OuterVolumeSpecName: "logs") pod "3486557d-93f8-44c2-b40a-dd8aca19d8e1" (UID: "3486557d-93f8-44c2-b40a-dd8aca19d8e1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.229632 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-lhd97" event={"ID":"2210f0ce-43b3-4560-84f8-b56a65414758","Type":"ContainerStarted","Data":"dea86c9f2fc512e486561b6b4a33e3b68be87ab38970e393e203c62d2cfd327c"} Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.232230 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3486557d-93f8-44c2-b40a-dd8aca19d8e1-kube-api-access-9srqw" (OuterVolumeSpecName: "kube-api-access-9srqw") pod "3486557d-93f8-44c2-b40a-dd8aca19d8e1" (UID: "3486557d-93f8-44c2-b40a-dd8aca19d8e1"). InnerVolumeSpecName "kube-api-access-9srqw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.232430 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45acd92f-2e5d-4fc1-8b91-c91f165e786a-kube-api-access-lzmmb" (OuterVolumeSpecName: "kube-api-access-lzmmb") pod "45acd92f-2e5d-4fc1-8b91-c91f165e786a" (UID: "45acd92f-2e5d-4fc1-8b91-c91f165e786a"). InnerVolumeSpecName "kube-api-access-lzmmb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.238733 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f43d30e-14e4-4978-bb02-a251305f9330-scripts" (OuterVolumeSpecName: "scripts") pod "8f43d30e-14e4-4978-bb02-a251305f9330" (UID: "8f43d30e-14e4-4978-bb02-a251305f9330"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.238858 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-scripts" (OuterVolumeSpecName: "scripts") pod "07efd759-c536-425d-938e-a8ccd41706cd" (UID: "07efd759-c536-425d-938e-a8ccd41706cd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.238956 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "glance") pod "8f43d30e-14e4-4978-bb02-a251305f9330" (UID: "8f43d30e-14e4-4978-bb02-a251305f9330"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.238958 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45acd92f-2e5d-4fc1-8b91-c91f165e786a-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "45acd92f-2e5d-4fc1-8b91-c91f165e786a" (UID: "45acd92f-2e5d-4fc1-8b91-c91f165e786a"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.242244 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07efd759-c536-425d-938e-a8ccd41706cd-kube-api-access-9p6gw" (OuterVolumeSpecName: "kube-api-access-9p6gw") pod "07efd759-c536-425d-938e-a8ccd41706cd" (UID: "07efd759-c536-425d-938e-a8ccd41706cd"). InnerVolumeSpecName "kube-api-access-9p6gw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.242385 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "07efd759-c536-425d-938e-a8ccd41706cd" (UID: "07efd759-c536-425d-938e-a8ccd41706cd"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.269238 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f43d30e-14e4-4978-bb02-a251305f9330-kube-api-access-9jdb2" (OuterVolumeSpecName: "kube-api-access-9jdb2") pod "8f43d30e-14e4-4978-bb02-a251305f9330" (UID: "8f43d30e-14e4-4978-bb02-a251305f9330"). InnerVolumeSpecName "kube-api-access-9jdb2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.288770 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"70100901-0709-4900-ac75-462a85b350c3","Type":"ContainerDied","Data":"c242a4f524cd3c368b38d7d0d20bc78c77a89ca828ad4dca71070afad45d6804"} Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.289898 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.297279 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7687b85c5d-l8k6w" event={"ID":"b29bf6bd-079e-4e8b-bec6-49d4923676af","Type":"ContainerDied","Data":"506be26d44cefd5ac4e779619bdc514c3a2aceb203aa2c9a18aa1caf4694818a"} Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.297461 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7687b85c5d-l8k6w" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.303702 4910 generic.go:334] "Generic (PLEG): container finished" podID="cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b" containerID="66adaa6dc30ca0eb6df8fdbc29cb135171d3e16efce93331526042109780467b" exitCode=0 Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.303748 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-78b74ccb54-wvrcf" event={"ID":"cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b","Type":"ContainerDied","Data":"66adaa6dc30ca0eb6df8fdbc29cb135171d3e16efce93331526042109780467b"} Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.303771 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-78b74ccb54-wvrcf" event={"ID":"cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b","Type":"ContainerDied","Data":"bd26a1afce2e60c3a75aefb1c31b748fd2c1227ac679f43b7a472a5f1649119f"} Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.303842 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-78b74ccb54-wvrcf" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.305691 4910 generic.go:334] "Generic (PLEG): container finished" podID="ce8ea9ec-e799-457a-aaca-e16b591bdf0c" containerID="dd977da3f8e7fc9fff03a9de2e1898d7cae116843deeda14da1e479c7ce300a4" exitCode=0 Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.305728 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-66897dc6c-9tqxs" event={"ID":"ce8ea9ec-e799-457a-aaca-e16b591bdf0c","Type":"ContainerDied","Data":"dd977da3f8e7fc9fff03a9de2e1898d7cae116843deeda14da1e479c7ce300a4"} Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.308521 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8f43d30e-14e4-4978-bb02-a251305f9330","Type":"ContainerDied","Data":"10308827d807bf76a4c1cd30847c997b175f6aad1bb3dcca809249e90cf15140"} Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.308598 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.310973 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3486557d-93f8-44c2-b40a-dd8aca19d8e1","Type":"ContainerDied","Data":"b66c4ba08833e050e588a54e883f9d8d4263532a4489d36589833f105def0349"} Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.311332 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.313566 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b651f520-1463-434f-b16f-edd2b1b8f8d9-combined-ca-bundle\") pod \"b651f520-1463-434f-b16f-edd2b1b8f8d9\" (UID: \"b651f520-1463-434f-b16f-edd2b1b8f8d9\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.313640 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf7e2b20-58e5-4c61-9e50-c1af51acf521-public-tls-certs\") pod \"cf7e2b20-58e5-4c61-9e50-c1af51acf521\" (UID: \"cf7e2b20-58e5-4c61-9e50-c1af51acf521\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.313674 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b-config-data-custom\") pod \"cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b\" (UID: \"cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.313711 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2nlzn\" (UniqueName: \"kubernetes.io/projected/cf7e2b20-58e5-4c61-9e50-c1af51acf521-kube-api-access-2nlzn\") pod \"cf7e2b20-58e5-4c61-9e50-c1af51acf521\" (UID: \"cf7e2b20-58e5-4c61-9e50-c1af51acf521\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.313752 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cf7e2b20-58e5-4c61-9e50-c1af51acf521-logs\") pod \"cf7e2b20-58e5-4c61-9e50-c1af51acf521\" (UID: \"cf7e2b20-58e5-4c61-9e50-c1af51acf521\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.313815 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/b651f520-1463-434f-b16f-edd2b1b8f8d9-kube-state-metrics-tls-certs\") pod \"b651f520-1463-434f-b16f-edd2b1b8f8d9\" (UID: \"b651f520-1463-434f-b16f-edd2b1b8f8d9\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.313845 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b-combined-ca-bundle\") pod \"cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b\" (UID: \"cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.313869 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/70100901-0709-4900-ac75-462a85b350c3-scripts\") pod \"70100901-0709-4900-ac75-462a85b350c3\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.313899 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70100901-0709-4900-ac75-462a85b350c3-config-data\") pod \"70100901-0709-4900-ac75-462a85b350c3\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.313938 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lc6jc\" (UniqueName: \"kubernetes.io/projected/70100901-0709-4900-ac75-462a85b350c3-kube-api-access-lc6jc\") pod \"70100901-0709-4900-ac75-462a85b350c3\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.313969 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7e2b20-58e5-4c61-9e50-c1af51acf521-combined-ca-bundle\") pod \"cf7e2b20-58e5-4c61-9e50-c1af51acf521\" (UID: \"cf7e2b20-58e5-4c61-9e50-c1af51acf521\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.314025 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gn8t9\" (UniqueName: \"kubernetes.io/projected/b651f520-1463-434f-b16f-edd2b1b8f8d9-kube-api-access-gn8t9\") pod \"b651f520-1463-434f-b16f-edd2b1b8f8d9\" (UID: \"b651f520-1463-434f-b16f-edd2b1b8f8d9\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.314056 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/70100901-0709-4900-ac75-462a85b350c3-httpd-run\") pod \"70100901-0709-4900-ac75-462a85b350c3\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.314093 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b-logs\") pod \"cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b\" (UID: \"cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.314224 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b-config-data\") pod \"cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b\" (UID: \"cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.314305 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xssxr\" (UniqueName: \"kubernetes.io/projected/cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b-kube-api-access-xssxr\") pod \"cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b\" (UID: \"cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.314343 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/b651f520-1463-434f-b16f-edd2b1b8f8d9-kube-state-metrics-tls-config\") pod \"b651f520-1463-434f-b16f-edd2b1b8f8d9\" (UID: \"b651f520-1463-434f-b16f-edd2b1b8f8d9\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.314420 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"70100901-0709-4900-ac75-462a85b350c3\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.314483 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf7e2b20-58e5-4c61-9e50-c1af51acf521-internal-tls-certs\") pod \"cf7e2b20-58e5-4c61-9e50-c1af51acf521\" (UID: \"cf7e2b20-58e5-4c61-9e50-c1af51acf521\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.314620 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/70100901-0709-4900-ac75-462a85b350c3-internal-tls-certs\") pod \"70100901-0709-4900-ac75-462a85b350c3\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.314652 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/70100901-0709-4900-ac75-462a85b350c3-logs\") pod \"70100901-0709-4900-ac75-462a85b350c3\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.314680 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf7e2b20-58e5-4c61-9e50-c1af51acf521-config-data\") pod \"cf7e2b20-58e5-4c61-9e50-c1af51acf521\" (UID: \"cf7e2b20-58e5-4c61-9e50-c1af51acf521\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.314702 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70100901-0709-4900-ac75-462a85b350c3-combined-ca-bundle\") pod \"70100901-0709-4900-ac75-462a85b350c3\" (UID: \"70100901-0709-4900-ac75-462a85b350c3\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.315201 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b29bf6bd-079e-4e8b-bec6-49d4923676af-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.315220 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vrztf\" (UniqueName: \"kubernetes.io/projected/b29bf6bd-079e-4e8b-bec6-49d4923676af-kube-api-access-vrztf\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.315233 4910 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8f43d30e-14e4-4978-bb02-a251305f9330-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.315244 4910 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/07efd759-c536-425d-938e-a8ccd41706cd-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.315255 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8f43d30e-14e4-4978-bb02-a251305f9330-logs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.315266 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9srqw\" (UniqueName: \"kubernetes.io/projected/3486557d-93f8-44c2-b40a-dd8aca19d8e1-kube-api-access-9srqw\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.315277 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9jdb2\" (UniqueName: \"kubernetes.io/projected/8f43d30e-14e4-4978-bb02-a251305f9330-kube-api-access-9jdb2\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.315288 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzmmb\" (UniqueName: \"kubernetes.io/projected/45acd92f-2e5d-4fc1-8b91-c91f165e786a-kube-api-access-lzmmb\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.315299 4910 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/45acd92f-2e5d-4fc1-8b91-c91f165e786a-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.315310 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.315321 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9p6gw\" (UniqueName: \"kubernetes.io/projected/07efd759-c536-425d-938e-a8ccd41706cd-kube-api-access-9p6gw\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.315347 4910 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.315359 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/45acd92f-2e5d-4fc1-8b91-c91f165e786a-logs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.315370 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f43d30e-14e4-4978-bb02-a251305f9330-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.315382 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3486557d-93f8-44c2-b40a-dd8aca19d8e1-logs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.315452 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07efd759-c536-425d-938e-a8ccd41706cd-logs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.315473 4910 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.315492 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b29bf6bd-079e-4e8b-bec6-49d4923676af-logs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.316490 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b-logs" (OuterVolumeSpecName: "logs") pod "cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b" (UID: "cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.317143 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf7e2b20-58e5-4c61-9e50-c1af51acf521-logs" (OuterVolumeSpecName: "logs") pod "cf7e2b20-58e5-4c61-9e50-c1af51acf521" (UID: "cf7e2b20-58e5-4c61-9e50-c1af51acf521"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.318479 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/70100901-0709-4900-ac75-462a85b350c3-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "70100901-0709-4900-ac75-462a85b350c3" (UID: "70100901-0709-4900-ac75-462a85b350c3"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.318663 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"b651f520-1463-434f-b16f-edd2b1b8f8d9","Type":"ContainerDied","Data":"95f2ff3b9c6c9c1bb55c592f771e9fecc4f1e0ca5bf74d5c87d32243569bc18f"} Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.318824 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.319603 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/70100901-0709-4900-ac75-462a85b350c3-logs" (OuterVolumeSpecName: "logs") pod "70100901-0709-4900-ac75-462a85b350c3" (UID: "70100901-0709-4900-ac75-462a85b350c3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.323779 4910 generic.go:334] "Generic (PLEG): container finished" podID="3d881977-4280-42f6-8ec5-65be97c8dc28" containerID="6b6e40e5636a9a405ca504456fe3ac469d0bf34f5f35ee789bc2b3c7fd43ed8f" exitCode=0 Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.323813 4910 generic.go:334] "Generic (PLEG): container finished" podID="3d881977-4280-42f6-8ec5-65be97c8dc28" containerID="b20fb279b37814b85e4faff0ba6be368ee421956a3846dd7b5ccb446665cf296" exitCode=0 Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.323821 4910 generic.go:334] "Generic (PLEG): container finished" podID="3d881977-4280-42f6-8ec5-65be97c8dc28" containerID="e2d862c3152a2babe7e6e933e033e365153addf3d9f0e0a5bfdf820d3c653e68" exitCode=0 Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.323853 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d881977-4280-42f6-8ec5-65be97c8dc28","Type":"ContainerDied","Data":"6b6e40e5636a9a405ca504456fe3ac469d0bf34f5f35ee789bc2b3c7fd43ed8f"} Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.323893 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d881977-4280-42f6-8ec5-65be97c8dc28","Type":"ContainerDied","Data":"b20fb279b37814b85e4faff0ba6be368ee421956a3846dd7b5ccb446665cf296"} Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.323908 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d881977-4280-42f6-8ec5-65be97c8dc28","Type":"ContainerDied","Data":"e2d862c3152a2babe7e6e933e033e365153addf3d9f0e0a5bfdf820d3c653e68"} Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.326649 4910 generic.go:334] "Generic (PLEG): container finished" podID="cf7e2b20-58e5-4c61-9e50-c1af51acf521" containerID="d7aecd1f8fe9c5ffa799f574329b9dee47f4fd0d6129def71457d2e4db819834" exitCode=0 Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.326740 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.326745 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cf7e2b20-58e5-4c61-9e50-c1af51acf521","Type":"ContainerDied","Data":"d7aecd1f8fe9c5ffa799f574329b9dee47f4fd0d6129def71457d2e4db819834"} Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.326859 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cf7e2b20-58e5-4c61-9e50-c1af51acf521","Type":"ContainerDied","Data":"f6c7e7a51d1e1d303d4bb88e273d5cb33cbf601c7df231262ca982405886341a"} Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.329100 4910 generic.go:334] "Generic (PLEG): container finished" podID="39608078-4c49-4ca6-b9d4-6cdd37d89f91" containerID="8117e13cdc918455769d99f76275cbebcd1d57825a878291492d6665a99db931" exitCode=0 Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.329287 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"39608078-4c49-4ca6-b9d4-6cdd37d89f91","Type":"ContainerDied","Data":"8117e13cdc918455769d99f76275cbebcd1d57825a878291492d6665a99db931"} Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.331481 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" event={"ID":"45acd92f-2e5d-4fc1-8b91-c91f165e786a","Type":"ContainerDied","Data":"ce24c89d86326d45b19f0ac31614562d1b1568b9c252798eff2b4b44a1749993"} Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.331587 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.333434 4910 generic.go:334] "Generic (PLEG): container finished" podID="244c7b09-d3d9-4ae7-864b-ff6758b0de6a" containerID="5f5f3c9fc2640058d48c86ff68f7c8aa4847482f6977cae797955ee8c5bef11c" exitCode=143 Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.333489 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-787f96fcd6-44r4b" event={"ID":"244c7b09-d3d9-4ae7-864b-ff6758b0de6a","Type":"ContainerDied","Data":"5f5f3c9fc2640058d48c86ff68f7c8aa4847482f6977cae797955ee8c5bef11c"} Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.335351 4910 generic.go:334] "Generic (PLEG): container finished" podID="de8aafdf-9b35-4c41-8726-6c7e86edee5f" containerID="38699171184dfa46b8af02c0e7a8bf314316f1f3e8f7f4d2c59c764a37fae22a" exitCode=0 Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.335476 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-b0f9-account-create-update-f5pmg" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.335826 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf7e2b20-58e5-4c61-9e50-c1af51acf521-kube-api-access-2nlzn" (OuterVolumeSpecName: "kube-api-access-2nlzn") pod "cf7e2b20-58e5-4c61-9e50-c1af51acf521" (UID: "cf7e2b20-58e5-4c61-9e50-c1af51acf521"). InnerVolumeSpecName "kube-api-access-2nlzn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.335843 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1a42-account-create-update-4vz6m" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.335842 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"de8aafdf-9b35-4c41-8726-6c7e86edee5f","Type":"ContainerDied","Data":"38699171184dfa46b8af02c0e7a8bf314316f1f3e8f7f4d2c59c764a37fae22a"} Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.335875 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-lnk9j" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.335887 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.335920 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5b71-account-create-update-85zq8" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.335842 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.336054 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-2c36-account-create-update-777vv" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.338884 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b-kube-api-access-xssxr" (OuterVolumeSpecName: "kube-api-access-xssxr") pod "cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b" (UID: "cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b"). InnerVolumeSpecName "kube-api-access-xssxr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.338891 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/70100901-0709-4900-ac75-462a85b350c3-scripts" (OuterVolumeSpecName: "scripts") pod "70100901-0709-4900-ac75-462a85b350c3" (UID: "70100901-0709-4900-ac75-462a85b350c3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.341300 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "70100901-0709-4900-ac75-462a85b350c3" (UID: "70100901-0709-4900-ac75-462a85b350c3"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.356602 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/70100901-0709-4900-ac75-462a85b350c3-kube-api-access-lc6jc" (OuterVolumeSpecName: "kube-api-access-lc6jc") pod "70100901-0709-4900-ac75-462a85b350c3" (UID: "70100901-0709-4900-ac75-462a85b350c3"). InnerVolumeSpecName "kube-api-access-lc6jc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.363580 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b" (UID: "cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.374544 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b651f520-1463-434f-b16f-edd2b1b8f8d9-kube-api-access-gn8t9" (OuterVolumeSpecName: "kube-api-access-gn8t9") pod "b651f520-1463-434f-b16f-edd2b1b8f8d9" (UID: "b651f520-1463-434f-b16f-edd2b1b8f8d9"). InnerVolumeSpecName "kube-api-access-gn8t9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.418091 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b-logs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.418317 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xssxr\" (UniqueName: \"kubernetes.io/projected/cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b-kube-api-access-xssxr\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.418399 4910 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.418459 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/70100901-0709-4900-ac75-462a85b350c3-logs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.418547 4910 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.418602 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2nlzn\" (UniqueName: \"kubernetes.io/projected/cf7e2b20-58e5-4c61-9e50-c1af51acf521-kube-api-access-2nlzn\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.418652 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cf7e2b20-58e5-4c61-9e50-c1af51acf521-logs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.418717 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/70100901-0709-4900-ac75-462a85b350c3-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.418773 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lc6jc\" (UniqueName: \"kubernetes.io/projected/70100901-0709-4900-ac75-462a85b350c3-kube-api-access-lc6jc\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.418831 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gn8t9\" (UniqueName: \"kubernetes.io/projected/b651f520-1463-434f-b16f-edd2b1b8f8d9-kube-api-access-gn8t9\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.418887 4910 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/70100901-0709-4900-ac75-462a85b350c3-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.492602 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3486557d-93f8-44c2-b40a-dd8aca19d8e1-config-data" (OuterVolumeSpecName: "config-data") pod "3486557d-93f8-44c2-b40a-dd8aca19d8e1" (UID: "3486557d-93f8-44c2-b40a-dd8aca19d8e1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.521162 4910 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.525414 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/da0001c8-7a9c-47a8-b901-03066bf6a7ff-operator-scripts\") pod \"keystone-5b71-account-create-update-85zq8\" (UID: \"da0001c8-7a9c-47a8-b901-03066bf6a7ff\") " pod="openstack/keystone-5b71-account-create-update-85zq8" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.525723 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hg6f7\" (UniqueName: \"kubernetes.io/projected/da0001c8-7a9c-47a8-b901-03066bf6a7ff-kube-api-access-hg6f7\") pod \"keystone-5b71-account-create-update-85zq8\" (UID: \"da0001c8-7a9c-47a8-b901-03066bf6a7ff\") " pod="openstack/keystone-5b71-account-create-update-85zq8" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.526319 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3486557d-93f8-44c2-b40a-dd8aca19d8e1-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.526349 4910 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: E0105 22:15:27.526782 4910 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 05 22:15:27 crc kubenswrapper[4910]: E0105 22:15:27.526844 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/da0001c8-7a9c-47a8-b901-03066bf6a7ff-operator-scripts podName:da0001c8-7a9c-47a8-b901-03066bf6a7ff nodeName:}" failed. No retries permitted until 2026-01-05 22:15:29.526825986 +0000 UTC m=+1461.104323656 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/da0001c8-7a9c-47a8-b901-03066bf6a7ff-operator-scripts") pod "keystone-5b71-account-create-update-85zq8" (UID: "da0001c8-7a9c-47a8-b901-03066bf6a7ff") : configmap "openstack-scripts" not found Jan 05 22:15:27 crc kubenswrapper[4910]: E0105 22:15:27.530142 4910 projected.go:194] Error preparing data for projected volume kube-api-access-hg6f7 for pod openstack/keystone-5b71-account-create-update-85zq8: failed to fetch token: serviceaccounts "galera-openstack" not found Jan 05 22:15:27 crc kubenswrapper[4910]: E0105 22:15:27.530236 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/da0001c8-7a9c-47a8-b901-03066bf6a7ff-kube-api-access-hg6f7 podName:da0001c8-7a9c-47a8-b901-03066bf6a7ff nodeName:}" failed. No retries permitted until 2026-01-05 22:15:29.53021166 +0000 UTC m=+1461.107709330 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-hg6f7" (UniqueName: "kubernetes.io/projected/da0001c8-7a9c-47a8-b901-03066bf6a7ff-kube-api-access-hg6f7") pod "keystone-5b71-account-create-update-85zq8" (UID: "da0001c8-7a9c-47a8-b901-03066bf6a7ff") : failed to fetch token: serviceaccounts "galera-openstack" not found Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.555406 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f43d30e-14e4-4978-bb02-a251305f9330-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8f43d30e-14e4-4978-bb02-a251305f9330" (UID: "8f43d30e-14e4-4978-bb02-a251305f9330"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.557310 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45acd92f-2e5d-4fc1-8b91-c91f165e786a-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "45acd92f-2e5d-4fc1-8b91-c91f165e786a" (UID: "45acd92f-2e5d-4fc1-8b91-c91f165e786a"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.568495 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3486557d-93f8-44c2-b40a-dd8aca19d8e1-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "3486557d-93f8-44c2-b40a-dd8aca19d8e1" (UID: "3486557d-93f8-44c2-b40a-dd8aca19d8e1"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.579567 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b651f520-1463-434f-b16f-edd2b1b8f8d9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b651f520-1463-434f-b16f-edd2b1b8f8d9" (UID: "b651f520-1463-434f-b16f-edd2b1b8f8d9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.593036 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b" (UID: "cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.593341 4910 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.593450 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b651f520-1463-434f-b16f-edd2b1b8f8d9-kube-state-metrics-tls-config" (OuterVolumeSpecName: "kube-state-metrics-tls-config") pod "b651f520-1463-434f-b16f-edd2b1b8f8d9" (UID: "b651f520-1463-434f-b16f-edd2b1b8f8d9"). InnerVolumeSpecName "kube-state-metrics-tls-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.604656 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "07efd759-c536-425d-938e-a8ccd41706cd" (UID: "07efd759-c536-425d-938e-a8ccd41706cd"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.605230 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "07efd759-c536-425d-938e-a8ccd41706cd" (UID: "07efd759-c536-425d-938e-a8ccd41706cd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.606393 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b29bf6bd-079e-4e8b-bec6-49d4923676af-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b29bf6bd-079e-4e8b-bec6-49d4923676af" (UID: "b29bf6bd-079e-4e8b-bec6-49d4923676af"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.629702 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-config-data" (OuterVolumeSpecName: "config-data") pod "07efd759-c536-425d-938e-a8ccd41706cd" (UID: "07efd759-c536-425d-938e-a8ccd41706cd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.629827 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f43d30e-14e4-4978-bb02-a251305f9330-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "8f43d30e-14e4-4978-bb02-a251305f9330" (UID: "8f43d30e-14e4-4978-bb02-a251305f9330"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.631823 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b651f520-1463-434f-b16f-edd2b1b8f8d9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.632391 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.632508 4910 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.632538 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.632556 4910 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8f43d30e-14e4-4978-bb02-a251305f9330-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.632572 4910 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/45acd92f-2e5d-4fc1-8b91-c91f165e786a-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.632592 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.632610 4910 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/b651f520-1463-434f-b16f-edd2b1b8f8d9-kube-state-metrics-tls-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.632626 4910 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/3486557d-93f8-44c2-b40a-dd8aca19d8e1-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.632641 4910 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.632653 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b29bf6bd-079e-4e8b-bec6-49d4923676af-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.632665 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f43d30e-14e4-4978-bb02-a251305f9330-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.633809 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/70100901-0709-4900-ac75-462a85b350c3-config-data" (OuterVolumeSpecName: "config-data") pod "70100901-0709-4900-ac75-462a85b350c3" (UID: "70100901-0709-4900-ac75-462a85b350c3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.649949 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/70100901-0709-4900-ac75-462a85b350c3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "70100901-0709-4900-ac75-462a85b350c3" (UID: "70100901-0709-4900-ac75-462a85b350c3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.650726 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3486557d-93f8-44c2-b40a-dd8aca19d8e1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3486557d-93f8-44c2-b40a-dd8aca19d8e1" (UID: "3486557d-93f8-44c2-b40a-dd8aca19d8e1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.650806 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b29bf6bd-079e-4e8b-bec6-49d4923676af-config-data" (OuterVolumeSpecName: "config-data") pod "b29bf6bd-079e-4e8b-bec6-49d4923676af" (UID: "b29bf6bd-079e-4e8b-bec6-49d4923676af"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.654537 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b-config-data" (OuterVolumeSpecName: "config-data") pod "cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b" (UID: "cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.682064 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45acd92f-2e5d-4fc1-8b91-c91f165e786a-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "45acd92f-2e5d-4fc1-8b91-c91f165e786a" (UID: "45acd92f-2e5d-4fc1-8b91-c91f165e786a"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.682080 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45acd92f-2e5d-4fc1-8b91-c91f165e786a-config-data" (OuterVolumeSpecName: "config-data") pod "45acd92f-2e5d-4fc1-8b91-c91f165e786a" (UID: "45acd92f-2e5d-4fc1-8b91-c91f165e786a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.682696 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/70100901-0709-4900-ac75-462a85b350c3-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "70100901-0709-4900-ac75-462a85b350c3" (UID: "70100901-0709-4900-ac75-462a85b350c3"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.683084 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf7e2b20-58e5-4c61-9e50-c1af51acf521-config-data" (OuterVolumeSpecName: "config-data") pod "cf7e2b20-58e5-4c61-9e50-c1af51acf521" (UID: "cf7e2b20-58e5-4c61-9e50-c1af51acf521"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.683398 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45acd92f-2e5d-4fc1-8b91-c91f165e786a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "45acd92f-2e5d-4fc1-8b91-c91f165e786a" (UID: "45acd92f-2e5d-4fc1-8b91-c91f165e786a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.684881 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf7e2b20-58e5-4c61-9e50-c1af51acf521-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cf7e2b20-58e5-4c61-9e50-c1af51acf521" (UID: "cf7e2b20-58e5-4c61-9e50-c1af51acf521"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.691857 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf7e2b20-58e5-4c61-9e50-c1af51acf521-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "cf7e2b20-58e5-4c61-9e50-c1af51acf521" (UID: "cf7e2b20-58e5-4c61-9e50-c1af51acf521"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.700343 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b29bf6bd-079e-4e8b-bec6-49d4923676af-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "b29bf6bd-079e-4e8b-bec6-49d4923676af" (UID: "b29bf6bd-079e-4e8b-bec6-49d4923676af"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.712484 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f43d30e-14e4-4978-bb02-a251305f9330-config-data" (OuterVolumeSpecName: "config-data") pod "8f43d30e-14e4-4978-bb02-a251305f9330" (UID: "8f43d30e-14e4-4978-bb02-a251305f9330"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.712624 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-867cd545c7-pd68r" podUID="f55a0cf4-44d3-4896-911b-430d13f1f67e" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.196:5353: i/o timeout" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.730029 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b651f520-1463-434f-b16f-edd2b1b8f8d9-kube-state-metrics-tls-certs" (OuterVolumeSpecName: "kube-state-metrics-tls-certs") pod "b651f520-1463-434f-b16f-edd2b1b8f8d9" (UID: "b651f520-1463-434f-b16f-edd2b1b8f8d9"). InnerVolumeSpecName "kube-state-metrics-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.730430 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf7e2b20-58e5-4c61-9e50-c1af51acf521-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "cf7e2b20-58e5-4c61-9e50-c1af51acf521" (UID: "cf7e2b20-58e5-4c61-9e50-c1af51acf521"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.734479 4910 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/45acd92f-2e5d-4fc1-8b91-c91f165e786a-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.734512 4910 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/b651f520-1463-434f-b16f-edd2b1b8f8d9-kube-state-metrics-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.734525 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70100901-0709-4900-ac75-462a85b350c3-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.734556 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7e2b20-58e5-4c61-9e50-c1af51acf521-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.734568 4910 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b29bf6bd-079e-4e8b-bec6-49d4923676af-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.734580 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.734591 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45acd92f-2e5d-4fc1-8b91-c91f165e786a-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.734602 4910 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf7e2b20-58e5-4c61-9e50-c1af51acf521-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.734613 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45acd92f-2e5d-4fc1-8b91-c91f165e786a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.734623 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b29bf6bd-079e-4e8b-bec6-49d4923676af-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.734634 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f43d30e-14e4-4978-bb02-a251305f9330-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.734646 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3486557d-93f8-44c2-b40a-dd8aca19d8e1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.734657 4910 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/70100901-0709-4900-ac75-462a85b350c3-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.734668 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf7e2b20-58e5-4c61-9e50-c1af51acf521-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.734679 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70100901-0709-4900-ac75-462a85b350c3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.734690 4910 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf7e2b20-58e5-4c61-9e50-c1af51acf521-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.735046 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "07efd759-c536-425d-938e-a8ccd41706cd" (UID: "07efd759-c536-425d-938e-a8ccd41706cd"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.758897 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b29bf6bd-079e-4e8b-bec6-49d4923676af-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "b29bf6bd-079e-4e8b-bec6-49d4923676af" (UID: "b29bf6bd-079e-4e8b-bec6-49d4923676af"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.836306 4910 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/07efd759-c536-425d-938e-a8ccd41706cd-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.836344 4910 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b29bf6bd-079e-4e8b-bec6-49d4923676af-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.836970 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5b71-account-create-update-85zq8" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.837602 4910 scope.go:117] "RemoveContainer" containerID="1e10056784aaab7edb53371b1e8ee1b1dfc4d02346c220a12403d46024abfaa4" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.882249 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.907585 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-66897dc6c-9tqxs" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.911399 4910 scope.go:117] "RemoveContainer" containerID="78b733a8056419d98b27c49e64b19c3144941beb236873f5de3f41a43f0fe70b" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.913779 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.930821 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 05 22:15:27 crc kubenswrapper[4910]: E0105 22:15:27.935874 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="03e2a0482d96bb74144b1ebf3502bf0c9e701db7ab42a851ca5abd53fadbfdf7" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.936916 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/39608078-4c49-4ca6-b9d4-6cdd37d89f91-kolla-config\") pod \"39608078-4c49-4ca6-b9d4-6cdd37d89f91\" (UID: \"39608078-4c49-4ca6-b9d4-6cdd37d89f91\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.936942 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d881977-4280-42f6-8ec5-65be97c8dc28-config-data\") pod \"3d881977-4280-42f6-8ec5-65be97c8dc28\" (UID: \"3d881977-4280-42f6-8ec5-65be97c8dc28\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.936960 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce8ea9ec-e799-457a-aaca-e16b591bdf0c-combined-ca-bundle\") pod \"ce8ea9ec-e799-457a-aaca-e16b591bdf0c\" (UID: \"ce8ea9ec-e799-457a-aaca-e16b591bdf0c\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.936980 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce8ea9ec-e799-457a-aaca-e16b591bdf0c-config-data\") pod \"ce8ea9ec-e799-457a-aaca-e16b591bdf0c\" (UID: \"ce8ea9ec-e799-457a-aaca-e16b591bdf0c\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.937004 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39608078-4c49-4ca6-b9d4-6cdd37d89f91-combined-ca-bundle\") pod \"39608078-4c49-4ca6-b9d4-6cdd37d89f91\" (UID: \"39608078-4c49-4ca6-b9d4-6cdd37d89f91\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.937026 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3d881977-4280-42f6-8ec5-65be97c8dc28-sg-core-conf-yaml\") pod \"3d881977-4280-42f6-8ec5-65be97c8dc28\" (UID: \"3d881977-4280-42f6-8ec5-65be97c8dc28\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.937046 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/39608078-4c49-4ca6-b9d4-6cdd37d89f91-config-data\") pod \"39608078-4c49-4ca6-b9d4-6cdd37d89f91\" (UID: \"39608078-4c49-4ca6-b9d4-6cdd37d89f91\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.937063 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d881977-4280-42f6-8ec5-65be97c8dc28-log-httpd\") pod \"3d881977-4280-42f6-8ec5-65be97c8dc28\" (UID: \"3d881977-4280-42f6-8ec5-65be97c8dc28\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.937086 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9m2j8\" (UniqueName: \"kubernetes.io/projected/39608078-4c49-4ca6-b9d4-6cdd37d89f91-kube-api-access-9m2j8\") pod \"39608078-4c49-4ca6-b9d4-6cdd37d89f91\" (UID: \"39608078-4c49-4ca6-b9d4-6cdd37d89f91\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.937109 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de8aafdf-9b35-4c41-8726-6c7e86edee5f-config-data\") pod \"de8aafdf-9b35-4c41-8726-6c7e86edee5f\" (UID: \"de8aafdf-9b35-4c41-8726-6c7e86edee5f\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.937154 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ce8ea9ec-e799-457a-aaca-e16b591bdf0c-config-data-custom\") pod \"ce8ea9ec-e799-457a-aaca-e16b591bdf0c\" (UID: \"ce8ea9ec-e799-457a-aaca-e16b591bdf0c\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.937178 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d881977-4280-42f6-8ec5-65be97c8dc28-run-httpd\") pod \"3d881977-4280-42f6-8ec5-65be97c8dc28\" (UID: \"3d881977-4280-42f6-8ec5-65be97c8dc28\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.937201 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ce8ea9ec-e799-457a-aaca-e16b591bdf0c-logs\") pod \"ce8ea9ec-e799-457a-aaca-e16b591bdf0c\" (UID: \"ce8ea9ec-e799-457a-aaca-e16b591bdf0c\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.937223 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/39608078-4c49-4ca6-b9d4-6cdd37d89f91-memcached-tls-certs\") pod \"39608078-4c49-4ca6-b9d4-6cdd37d89f91\" (UID: \"39608078-4c49-4ca6-b9d4-6cdd37d89f91\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.937285 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-drhf4\" (UniqueName: \"kubernetes.io/projected/ce8ea9ec-e799-457a-aaca-e16b591bdf0c-kube-api-access-drhf4\") pod \"ce8ea9ec-e799-457a-aaca-e16b591bdf0c\" (UID: \"ce8ea9ec-e799-457a-aaca-e16b591bdf0c\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.937318 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mxxs2\" (UniqueName: \"kubernetes.io/projected/3d881977-4280-42f6-8ec5-65be97c8dc28-kube-api-access-mxxs2\") pod \"3d881977-4280-42f6-8ec5-65be97c8dc28\" (UID: \"3d881977-4280-42f6-8ec5-65be97c8dc28\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.937333 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d881977-4280-42f6-8ec5-65be97c8dc28-scripts\") pod \"3d881977-4280-42f6-8ec5-65be97c8dc28\" (UID: \"3d881977-4280-42f6-8ec5-65be97c8dc28\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.937354 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d881977-4280-42f6-8ec5-65be97c8dc28-combined-ca-bundle\") pod \"3d881977-4280-42f6-8ec5-65be97c8dc28\" (UID: \"3d881977-4280-42f6-8ec5-65be97c8dc28\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.937370 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tttg6\" (UniqueName: \"kubernetes.io/projected/de8aafdf-9b35-4c41-8726-6c7e86edee5f-kube-api-access-tttg6\") pod \"de8aafdf-9b35-4c41-8726-6c7e86edee5f\" (UID: \"de8aafdf-9b35-4c41-8726-6c7e86edee5f\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.937406 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de8aafdf-9b35-4c41-8726-6c7e86edee5f-combined-ca-bundle\") pod \"de8aafdf-9b35-4c41-8726-6c7e86edee5f\" (UID: \"de8aafdf-9b35-4c41-8726-6c7e86edee5f\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.937422 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d881977-4280-42f6-8ec5-65be97c8dc28-ceilometer-tls-certs\") pod \"3d881977-4280-42f6-8ec5-65be97c8dc28\" (UID: \"3d881977-4280-42f6-8ec5-65be97c8dc28\") " Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.942579 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/39608078-4c49-4ca6-b9d4-6cdd37d89f91-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "39608078-4c49-4ca6-b9d4-6cdd37d89f91" (UID: "39608078-4c49-4ca6-b9d4-6cdd37d89f91"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: E0105 22:15:27.942728 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="03e2a0482d96bb74144b1ebf3502bf0c9e701db7ab42a851ca5abd53fadbfdf7" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.946891 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ce8ea9ec-e799-457a-aaca-e16b591bdf0c-logs" (OuterVolumeSpecName: "logs") pod "ce8ea9ec-e799-457a-aaca-e16b591bdf0c" (UID: "ce8ea9ec-e799-457a-aaca-e16b591bdf0c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.947877 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3d881977-4280-42f6-8ec5-65be97c8dc28-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "3d881977-4280-42f6-8ec5-65be97c8dc28" (UID: "3d881977-4280-42f6-8ec5-65be97c8dc28"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.949065 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/39608078-4c49-4ca6-b9d4-6cdd37d89f91-config-data" (OuterVolumeSpecName: "config-data") pod "39608078-4c49-4ca6-b9d4-6cdd37d89f91" (UID: "39608078-4c49-4ca6-b9d4-6cdd37d89f91"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.955278 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3d881977-4280-42f6-8ec5-65be97c8dc28-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "3d881977-4280-42f6-8ec5-65be97c8dc28" (UID: "3d881977-4280-42f6-8ec5-65be97c8dc28"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: E0105 22:15:27.956637 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="03e2a0482d96bb74144b1ebf3502bf0c9e701db7ab42a851ca5abd53fadbfdf7" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 05 22:15:27 crc kubenswrapper[4910]: E0105 22:15:27.956707 4910 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="83319bb4-7278-49b3-8ef2-beb8baa0a1a6" containerName="nova-scheduler-scheduler" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.963823 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-lnk9j"] Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.967924 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce8ea9ec-e799-457a-aaca-e16b591bdf0c-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "ce8ea9ec-e799-457a-aaca-e16b591bdf0c" (UID: "ce8ea9ec-e799-457a-aaca-e16b591bdf0c"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.969451 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39608078-4c49-4ca6-b9d4-6cdd37d89f91-kube-api-access-9m2j8" (OuterVolumeSpecName: "kube-api-access-9m2j8") pod "39608078-4c49-4ca6-b9d4-6cdd37d89f91" (UID: "39608078-4c49-4ca6-b9d4-6cdd37d89f91"). InnerVolumeSpecName "kube-api-access-9m2j8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.971239 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d881977-4280-42f6-8ec5-65be97c8dc28-scripts" (OuterVolumeSpecName: "scripts") pod "3d881977-4280-42f6-8ec5-65be97c8dc28" (UID: "3d881977-4280-42f6-8ec5-65be97c8dc28"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.976703 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d881977-4280-42f6-8ec5-65be97c8dc28-kube-api-access-mxxs2" (OuterVolumeSpecName: "kube-api-access-mxxs2") pod "3d881977-4280-42f6-8ec5-65be97c8dc28" (UID: "3d881977-4280-42f6-8ec5-65be97c8dc28"). InnerVolumeSpecName "kube-api-access-mxxs2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.979458 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-lnk9j"] Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.987259 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce8ea9ec-e799-457a-aaca-e16b591bdf0c-kube-api-access-drhf4" (OuterVolumeSpecName: "kube-api-access-drhf4") pod "ce8ea9ec-e799-457a-aaca-e16b591bdf0c" (UID: "ce8ea9ec-e799-457a-aaca-e16b591bdf0c"). InnerVolumeSpecName "kube-api-access-drhf4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:27 crc kubenswrapper[4910]: I0105 22:15:27.996447 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.014635 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.015399 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de8aafdf-9b35-4c41-8726-6c7e86edee5f-kube-api-access-tttg6" (OuterVolumeSpecName: "kube-api-access-tttg6") pod "de8aafdf-9b35-4c41-8726-6c7e86edee5f" (UID: "de8aafdf-9b35-4c41-8726-6c7e86edee5f"). InnerVolumeSpecName "kube-api-access-tttg6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.017452 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39608078-4c49-4ca6-b9d4-6cdd37d89f91-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "39608078-4c49-4ca6-b9d4-6cdd37d89f91" (UID: "39608078-4c49-4ca6-b9d4-6cdd37d89f91"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.033401 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce8ea9ec-e799-457a-aaca-e16b591bdf0c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ce8ea9ec-e799-457a-aaca-e16b591bdf0c" (UID: "ce8ea9ec-e799-457a-aaca-e16b591bdf0c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.038736 4910 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/39608078-4c49-4ca6-b9d4-6cdd37d89f91-kolla-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.038767 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce8ea9ec-e799-457a-aaca-e16b591bdf0c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.038778 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39608078-4c49-4ca6-b9d4-6cdd37d89f91-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.038789 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/39608078-4c49-4ca6-b9d4-6cdd37d89f91-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.038800 4910 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d881977-4280-42f6-8ec5-65be97c8dc28-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.038813 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9m2j8\" (UniqueName: \"kubernetes.io/projected/39608078-4c49-4ca6-b9d4-6cdd37d89f91-kube-api-access-9m2j8\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.038825 4910 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ce8ea9ec-e799-457a-aaca-e16b591bdf0c-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.038835 4910 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d881977-4280-42f6-8ec5-65be97c8dc28-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.038846 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ce8ea9ec-e799-457a-aaca-e16b591bdf0c-logs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.038858 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-drhf4\" (UniqueName: \"kubernetes.io/projected/ce8ea9ec-e799-457a-aaca-e16b591bdf0c-kube-api-access-drhf4\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.038869 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d881977-4280-42f6-8ec5-65be97c8dc28-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.038879 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mxxs2\" (UniqueName: \"kubernetes.io/projected/3d881977-4280-42f6-8ec5-65be97c8dc28-kube-api-access-mxxs2\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.038891 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tttg6\" (UniqueName: \"kubernetes.io/projected/de8aafdf-9b35-4c41-8726-6c7e86edee5f-kube-api-access-tttg6\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:28 crc kubenswrapper[4910]: E0105 22:15:28.038963 4910 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 05 22:15:28 crc kubenswrapper[4910]: E0105 22:15:28.039015 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7e2a3efd-2de7-493e-af91-900b224e5313-config-data podName:7e2a3efd-2de7-493e-af91-900b224e5313 nodeName:}" failed. No retries permitted until 2026-01-05 22:15:36.038998278 +0000 UTC m=+1467.616495958 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/7e2a3efd-2de7-493e-af91-900b224e5313-config-data") pod "rabbitmq-server-0" (UID: "7e2a3efd-2de7-493e-af91-900b224e5313") : configmap "rabbitmq-config-data" not found Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.040764 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-b0f9-account-create-update-f5pmg"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.052405 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d881977-4280-42f6-8ec5-65be97c8dc28-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "3d881977-4280-42f6-8ec5-65be97c8dc28" (UID: "3d881977-4280-42f6-8ec5-65be97c8dc28"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.052491 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-b0f9-account-create-update-f5pmg"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.085487 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-1a42-account-create-update-4vz6m"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.134774 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-1a42-account-create-update-4vz6m"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.140854 4910 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3d881977-4280-42f6-8ec5-65be97c8dc28-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.147608 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce8ea9ec-e799-457a-aaca-e16b591bdf0c-config-data" (OuterVolumeSpecName: "config-data") pod "ce8ea9ec-e799-457a-aaca-e16b591bdf0c" (UID: "ce8ea9ec-e799-457a-aaca-e16b591bdf0c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.150805 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de8aafdf-9b35-4c41-8726-6c7e86edee5f-config-data" (OuterVolumeSpecName: "config-data") pod "de8aafdf-9b35-4c41-8726-6c7e86edee5f" (UID: "de8aafdf-9b35-4c41-8726-6c7e86edee5f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.153422 4910 scope.go:117] "RemoveContainer" containerID="008ff3c44ce49caf6caea7aa9f55cfc608a8d5e702630f035b8953f4de51ddc1" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.159348 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de8aafdf-9b35-4c41-8726-6c7e86edee5f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "de8aafdf-9b35-4c41-8726-6c7e86edee5f" (UID: "de8aafdf-9b35-4c41-8726-6c7e86edee5f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.176909 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d881977-4280-42f6-8ec5-65be97c8dc28-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "3d881977-4280-42f6-8ec5-65be97c8dc28" (UID: "3d881977-4280-42f6-8ec5-65be97c8dc28"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.210629 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-lhd97" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.214820 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-2c36-account-create-update-777vv"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.245367 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39608078-4c49-4ca6-b9d4-6cdd37d89f91-memcached-tls-certs" (OuterVolumeSpecName: "memcached-tls-certs") pod "39608078-4c49-4ca6-b9d4-6cdd37d89f91" (UID: "39608078-4c49-4ca6-b9d4-6cdd37d89f91"). InnerVolumeSpecName "memcached-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.246684 4910 scope.go:117] "RemoveContainer" containerID="5f896af4ce5feef15b4dba2b2abb97a685fc637f2ec21e921db5a1f857688437" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.250136 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de8aafdf-9b35-4c41-8726-6c7e86edee5f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.250227 4910 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d881977-4280-42f6-8ec5-65be97c8dc28-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.250306 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce8ea9ec-e799-457a-aaca-e16b591bdf0c-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.250367 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de8aafdf-9b35-4c41-8726-6c7e86edee5f-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.250443 4910 reconciler_common.go:293] "Volume detached for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/39608078-4c49-4ca6-b9d4-6cdd37d89f91-memcached-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.250258 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d881977-4280-42f6-8ec5-65be97c8dc28-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3d881977-4280-42f6-8ec5-65be97c8dc28" (UID: "3d881977-4280-42f6-8ec5-65be97c8dc28"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.250342 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d881977-4280-42f6-8ec5-65be97c8dc28-config-data" (OuterVolumeSpecName: "config-data") pod "3d881977-4280-42f6-8ec5-65be97c8dc28" (UID: "3d881977-4280-42f6-8ec5-65be97c8dc28"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.304899 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-2c36-account-create-update-777vv"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.354392 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2210f0ce-43b3-4560-84f8-b56a65414758-operator-scripts\") pod \"2210f0ce-43b3-4560-84f8-b56a65414758\" (UID: \"2210f0ce-43b3-4560-84f8-b56a65414758\") " Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.357811 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4txxg\" (UniqueName: \"kubernetes.io/projected/2210f0ce-43b3-4560-84f8-b56a65414758-kube-api-access-4txxg\") pod \"2210f0ce-43b3-4560-84f8-b56a65414758\" (UID: \"2210f0ce-43b3-4560-84f8-b56a65414758\") " Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.364831 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d881977-4280-42f6-8ec5-65be97c8dc28-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.364864 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d881977-4280-42f6-8ec5-65be97c8dc28-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.355766 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2210f0ce-43b3-4560-84f8-b56a65414758-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2210f0ce-43b3-4560-84f8-b56a65414758" (UID: "2210f0ce-43b3-4560-84f8-b56a65414758"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.365942 4910 scope.go:117] "RemoveContainer" containerID="66adaa6dc30ca0eb6df8fdbc29cb135171d3e16efce93331526042109780467b" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.368230 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"de8aafdf-9b35-4c41-8726-6c7e86edee5f","Type":"ContainerDied","Data":"4a96f0146a60e825d8c8659118268e18b77004d2aec84b07cf56c74d90452388"} Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.368274 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.372742 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2210f0ce-43b3-4560-84f8-b56a65414758-kube-api-access-4txxg" (OuterVolumeSpecName: "kube-api-access-4txxg") pod "2210f0ce-43b3-4560-84f8-b56a65414758" (UID: "2210f0ce-43b3-4560-84f8-b56a65414758"). InnerVolumeSpecName "kube-api-access-4txxg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.377166 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"39608078-4c49-4ca6-b9d4-6cdd37d89f91","Type":"ContainerDied","Data":"3c88da6b0c96eda18ce1a771f4bf3b99935aa4de65bb73c2512ac94c7c0248fc"} Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.377178 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.377715 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-7687b85c5d-l8k6w"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.386809 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-lhd97" event={"ID":"2210f0ce-43b3-4560-84f8-b56a65414758","Type":"ContainerDied","Data":"dea86c9f2fc512e486561b6b4a33e3b68be87ab38970e393e203c62d2cfd327c"} Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.386969 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-lhd97" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.397953 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-7687b85c5d-l8k6w"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.432339 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d881977-4280-42f6-8ec5-65be97c8dc28","Type":"ContainerDied","Data":"8cc82c8ba918ec8c625a3be0674e6dbbdff1bcd246160f5bcea2d78987f50edb"} Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.432536 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.452767 4910 scope.go:117] "RemoveContainer" containerID="71aa4a23693de64aaa8cbbd15881cbffefc0342e211e96812e957ba634cedcdb" Jan 05 22:15:28 crc kubenswrapper[4910]: E0105 22:15:28.455599 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9b905fc177f4972520c21e7cfca7402f4ec2958051e09f353da013b9785c1fd5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 05 22:15:28 crc kubenswrapper[4910]: E0105 22:15:28.455690 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338 is running failed: container process not found" containerID="1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.457567 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5b71-account-create-update-85zq8" Jan 05 22:15:28 crc kubenswrapper[4910]: E0105 22:15:28.458180 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338 is running failed: container process not found" containerID="1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.458231 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-66897dc6c-9tqxs" event={"ID":"ce8ea9ec-e799-457a-aaca-e16b591bdf0c","Type":"ContainerDied","Data":"6dae96ce47f7edad278dc6b7a0b2d411166a07289e3cd626972b54862f560b7c"} Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.458317 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-66897dc6c-9tqxs" Jan 05 22:15:28 crc kubenswrapper[4910]: E0105 22:15:28.459049 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338 is running failed: container process not found" containerID="1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 05 22:15:28 crc kubenswrapper[4910]: E0105 22:15:28.459109 4910 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-9g2kt" podUID="780aad6a-41ff-410c-a6fc-6be2faf38b6f" containerName="ovsdb-server" Jan 05 22:15:28 crc kubenswrapper[4910]: E0105 22:15:28.463038 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9b905fc177f4972520c21e7cfca7402f4ec2958051e09f353da013b9785c1fd5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.466627 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2210f0ce-43b3-4560-84f8-b56a65414758-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.466663 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4txxg\" (UniqueName: \"kubernetes.io/projected/2210f0ce-43b3-4560-84f8-b56a65414758-kube-api-access-4txxg\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.467695 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-78b74ccb54-wvrcf"] Jan 05 22:15:28 crc kubenswrapper[4910]: E0105 22:15:28.467756 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9b905fc177f4972520c21e7cfca7402f4ec2958051e09f353da013b9785c1fd5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 05 22:15:28 crc kubenswrapper[4910]: E0105 22:15:28.468058 4910 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-9g2kt" podUID="780aad6a-41ff-410c-a6fc-6be2faf38b6f" containerName="ovs-vswitchd" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.482164 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-keystone-listener-78b74ccb54-wvrcf"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.493195 4910 scope.go:117] "RemoveContainer" containerID="66adaa6dc30ca0eb6df8fdbc29cb135171d3e16efce93331526042109780467b" Jan 05 22:15:28 crc kubenswrapper[4910]: E0105 22:15:28.496445 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"66adaa6dc30ca0eb6df8fdbc29cb135171d3e16efce93331526042109780467b\": container with ID starting with 66adaa6dc30ca0eb6df8fdbc29cb135171d3e16efce93331526042109780467b not found: ID does not exist" containerID="66adaa6dc30ca0eb6df8fdbc29cb135171d3e16efce93331526042109780467b" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.496497 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"66adaa6dc30ca0eb6df8fdbc29cb135171d3e16efce93331526042109780467b"} err="failed to get container status \"66adaa6dc30ca0eb6df8fdbc29cb135171d3e16efce93331526042109780467b\": rpc error: code = NotFound desc = could not find container \"66adaa6dc30ca0eb6df8fdbc29cb135171d3e16efce93331526042109780467b\": container with ID starting with 66adaa6dc30ca0eb6df8fdbc29cb135171d3e16efce93331526042109780467b not found: ID does not exist" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.496523 4910 scope.go:117] "RemoveContainer" containerID="71aa4a23693de64aaa8cbbd15881cbffefc0342e211e96812e957ba634cedcdb" Jan 05 22:15:28 crc kubenswrapper[4910]: E0105 22:15:28.496918 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"71aa4a23693de64aaa8cbbd15881cbffefc0342e211e96812e957ba634cedcdb\": container with ID starting with 71aa4a23693de64aaa8cbbd15881cbffefc0342e211e96812e957ba634cedcdb not found: ID does not exist" containerID="71aa4a23693de64aaa8cbbd15881cbffefc0342e211e96812e957ba634cedcdb" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.496947 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71aa4a23693de64aaa8cbbd15881cbffefc0342e211e96812e957ba634cedcdb"} err="failed to get container status \"71aa4a23693de64aaa8cbbd15881cbffefc0342e211e96812e957ba634cedcdb\": rpc error: code = NotFound desc = could not find container \"71aa4a23693de64aaa8cbbd15881cbffefc0342e211e96812e957ba634cedcdb\": container with ID starting with 71aa4a23693de64aaa8cbbd15881cbffefc0342e211e96812e957ba634cedcdb not found: ID does not exist" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.496964 4910 scope.go:117] "RemoveContainer" containerID="e302bde0bc25b21936e7ca65ca2849db5acaa0ddf0792ac1f5ffccee28c53746" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.526051 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.526112 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.526144 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.535189 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.553094 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.560346 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.565320 4910 scope.go:117] "RemoveContainer" containerID="108372c447325380382fbbd2e70aa8ef323e8b23d29f6e32887e63496ac39324" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.587980 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.591233 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.614640 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-6bbbdf8dc6-s6tmf"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.624173 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-6bbbdf8dc6-s6tmf"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.632476 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.643811 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.643871 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.658287 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.687755 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.703304 4910 scope.go:117] "RemoveContainer" containerID="9f4f5a94d78ccf55b8b88bf158362b3d9f7fee1d51111812e72271a6887b1360" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.703464 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.778373 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="04ef3843-8448-4842-aaf3-7e2bcc428122" path="/var/lib/kubelet/pods/04ef3843-8448-4842-aaf3-7e2bcc428122/volumes" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.778795 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07efd759-c536-425d-938e-a8ccd41706cd" path="/var/lib/kubelet/pods/07efd759-c536-425d-938e-a8ccd41706cd/volumes" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.779526 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19d63cd6-26c3-439b-a9f6-5a53f27d9e0e" path="/var/lib/kubelet/pods/19d63cd6-26c3-439b-a9f6-5a53f27d9e0e/volumes" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.783075 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24f2eef4-3eac-4643-bffa-0747afae172a" path="/var/lib/kubelet/pods/24f2eef4-3eac-4643-bffa-0747afae172a/volumes" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.783726 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3486557d-93f8-44c2-b40a-dd8aca19d8e1" path="/var/lib/kubelet/pods/3486557d-93f8-44c2-b40a-dd8aca19d8e1/volumes" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.784349 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69" path="/var/lib/kubelet/pods/3c4c041e-ee03-4ba1-b6b5-c32ed7cb3b69/volumes" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.785017 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45acd92f-2e5d-4fc1-8b91-c91f165e786a" path="/var/lib/kubelet/pods/45acd92f-2e5d-4fc1-8b91-c91f165e786a/volumes" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.786182 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="70100901-0709-4900-ac75-462a85b350c3" path="/var/lib/kubelet/pods/70100901-0709-4900-ac75-462a85b350c3/volumes" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.786885 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="70694d65-fa64-4667-b1aa-bac50650687c" path="/var/lib/kubelet/pods/70694d65-fa64-4667-b1aa-bac50650687c/volumes" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.788027 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8db5b2ca-0224-4f13-b7d6-be9fb7aa7e07" path="/var/lib/kubelet/pods/8db5b2ca-0224-4f13-b7d6-be9fb7aa7e07/volumes" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.788460 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f43d30e-14e4-4978-bb02-a251305f9330" path="/var/lib/kubelet/pods/8f43d30e-14e4-4978-bb02-a251305f9330/volumes" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.789055 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b29bf6bd-079e-4e8b-bec6-49d4923676af" path="/var/lib/kubelet/pods/b29bf6bd-079e-4e8b-bec6-49d4923676af/volumes" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.789805 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b651f520-1463-434f-b16f-edd2b1b8f8d9" path="/var/lib/kubelet/pods/b651f520-1463-434f-b16f-edd2b1b8f8d9/volumes" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.790987 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b" path="/var/lib/kubelet/pods/cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b/volumes" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.791626 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf7e2b20-58e5-4c61-9e50-c1af51acf521" path="/var/lib/kubelet/pods/cf7e2b20-58e5-4c61-9e50-c1af51acf521/volumes" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.792762 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da2a33ae-86a0-465d-a05e-89007e39e580" path="/var/lib/kubelet/pods/da2a33ae-86a0-465d-a05e-89007e39e580/volumes" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.793255 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de8aafdf-9b35-4c41-8726-6c7e86edee5f" path="/var/lib/kubelet/pods/de8aafdf-9b35-4c41-8726-6c7e86edee5f/volumes" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.793818 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e63178b0-da1f-4d9c-b680-9fdddcd51b9a" path="/var/lib/kubelet/pods/e63178b0-da1f-4d9c-b680-9fdddcd51b9a/volumes" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.794493 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f509687a-bb68-4247-b4de-0f0cb99ca389" path="/var/lib/kubelet/pods/f509687a-bb68-4247-b4de-0f0cb99ca389/volumes" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.795398 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f9587597-0dcc-4c3a-b578-f9797dd2f9c1" path="/var/lib/kubelet/pods/f9587597-0dcc-4c3a-b578-f9797dd2f9c1/volumes" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.796234 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.796267 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.796287 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.796299 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/memcached-0"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.796309 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-66897dc6c-9tqxs"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.796321 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-worker-66897dc6c-9tqxs"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.803661 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-5b71-account-create-update-85zq8"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.809228 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-5b71-account-create-update-85zq8"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.809297 4910 scope.go:117] "RemoveContainer" containerID="a8b2b4d5b559dae71be16c91b6e3ceb8f53c013e2ed93dca2aa9f32d74982c10" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.828684 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-lhd97"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.831088 4910 scope.go:117] "RemoveContainer" containerID="7eb793854dd2ca885d20aea4858a9baa5dd3b5bf64d04ad614881d8b63b82097" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.833132 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-lhd97"] Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.856361 4910 scope.go:117] "RemoveContainer" containerID="d7aecd1f8fe9c5ffa799f574329b9dee47f4fd0d6129def71457d2e4db819834" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.887369 4910 scope.go:117] "RemoveContainer" containerID="b5b23f1d39fd87015c972670711fa8663521d44165624ef12201ef9f0c36a505" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.988644 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/da0001c8-7a9c-47a8-b901-03066bf6a7ff-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.989781 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hg6f7\" (UniqueName: \"kubernetes.io/projected/da0001c8-7a9c-47a8-b901-03066bf6a7ff-kube-api-access-hg6f7\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.999013 4910 scope.go:117] "RemoveContainer" containerID="d7aecd1f8fe9c5ffa799f574329b9dee47f4fd0d6129def71457d2e4db819834" Jan 05 22:15:28 crc kubenswrapper[4910]: E0105 22:15:28.999680 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d7aecd1f8fe9c5ffa799f574329b9dee47f4fd0d6129def71457d2e4db819834\": container with ID starting with d7aecd1f8fe9c5ffa799f574329b9dee47f4fd0d6129def71457d2e4db819834 not found: ID does not exist" containerID="d7aecd1f8fe9c5ffa799f574329b9dee47f4fd0d6129def71457d2e4db819834" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.999728 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d7aecd1f8fe9c5ffa799f574329b9dee47f4fd0d6129def71457d2e4db819834"} err="failed to get container status \"d7aecd1f8fe9c5ffa799f574329b9dee47f4fd0d6129def71457d2e4db819834\": rpc error: code = NotFound desc = could not find container \"d7aecd1f8fe9c5ffa799f574329b9dee47f4fd0d6129def71457d2e4db819834\": container with ID starting with d7aecd1f8fe9c5ffa799f574329b9dee47f4fd0d6129def71457d2e4db819834 not found: ID does not exist" Jan 05 22:15:28 crc kubenswrapper[4910]: I0105 22:15:28.999762 4910 scope.go:117] "RemoveContainer" containerID="b5b23f1d39fd87015c972670711fa8663521d44165624ef12201ef9f0c36a505" Jan 05 22:15:29 crc kubenswrapper[4910]: E0105 22:15:29.000547 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b5b23f1d39fd87015c972670711fa8663521d44165624ef12201ef9f0c36a505\": container with ID starting with b5b23f1d39fd87015c972670711fa8663521d44165624ef12201ef9f0c36a505 not found: ID does not exist" containerID="b5b23f1d39fd87015c972670711fa8663521d44165624ef12201ef9f0c36a505" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.000583 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b5b23f1d39fd87015c972670711fa8663521d44165624ef12201ef9f0c36a505"} err="failed to get container status \"b5b23f1d39fd87015c972670711fa8663521d44165624ef12201ef9f0c36a505\": rpc error: code = NotFound desc = could not find container \"b5b23f1d39fd87015c972670711fa8663521d44165624ef12201ef9f0c36a505\": container with ID starting with b5b23f1d39fd87015c972670711fa8663521d44165624ef12201ef9f0c36a505 not found: ID does not exist" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.000610 4910 scope.go:117] "RemoveContainer" containerID="a005751f16bf05306ffd138b7900c870797084700111340ccf797cab547f6f2e" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.038542 4910 scope.go:117] "RemoveContainer" containerID="6104667b5ae1cdcd47a597709123b12716141db09f9b433cb838f5a9fceaa70c" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.143575 4910 scope.go:117] "RemoveContainer" containerID="38699171184dfa46b8af02c0e7a8bf314316f1f3e8f7f4d2c59c764a37fae22a" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.184536 4910 scope.go:117] "RemoveContainer" containerID="8117e13cdc918455769d99f76275cbebcd1d57825a878291492d6665a99db931" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.220336 4910 scope.go:117] "RemoveContainer" containerID="6b6e40e5636a9a405ca504456fe3ac469d0bf34f5f35ee789bc2b3c7fd43ed8f" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.256613 4910 scope.go:117] "RemoveContainer" containerID="f4147284652162acb83f98dd4b38c821d03f77ed60c1b2b0c22836ec39ba3492" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.280447 4910 scope.go:117] "RemoveContainer" containerID="b20fb279b37814b85e4faff0ba6be368ee421956a3846dd7b5ccb446665cf296" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.352096 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.370733 4910 scope.go:117] "RemoveContainer" containerID="e2d862c3152a2babe7e6e933e033e365153addf3d9f0e0a5bfdf820d3c653e68" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.479591 4910 generic.go:334] "Generic (PLEG): container finished" podID="2cb18efe-a80d-4657-921d-af4a18ae279d" containerID="631d63a96f64fb0aa20db63e43afb3158c0927307ea2182cd6951a7f9852fdca" exitCode=0 Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.479703 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"2cb18efe-a80d-4657-921d-af4a18ae279d","Type":"ContainerDied","Data":"631d63a96f64fb0aa20db63e43afb3158c0927307ea2182cd6951a7f9852fdca"} Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.489307 4910 generic.go:334] "Generic (PLEG): container finished" podID="7e2a3efd-2de7-493e-af91-900b224e5313" containerID="642125ac821bb754a0c42680f8f99f5a13b1a90ac3a61d0a934715684f4248eb" exitCode=0 Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.489405 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7e2a3efd-2de7-493e-af91-900b224e5313","Type":"ContainerDied","Data":"642125ac821bb754a0c42680f8f99f5a13b1a90ac3a61d0a934715684f4248eb"} Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.489445 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7e2a3efd-2de7-493e-af91-900b224e5313","Type":"ContainerDied","Data":"257a995f3b502011a7d5689ff58613770560f7c76b1e31d51c2505cdaa144b93"} Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.489592 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.496356 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"7e2a3efd-2de7-493e-af91-900b224e5313\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.496635 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7e2a3efd-2de7-493e-af91-900b224e5313-rabbitmq-erlang-cookie\") pod \"7e2a3efd-2de7-493e-af91-900b224e5313\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.496693 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7e2a3efd-2de7-493e-af91-900b224e5313-rabbitmq-confd\") pod \"7e2a3efd-2de7-493e-af91-900b224e5313\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.496727 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7e2a3efd-2de7-493e-af91-900b224e5313-rabbitmq-tls\") pod \"7e2a3efd-2de7-493e-af91-900b224e5313\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.496755 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7e2a3efd-2de7-493e-af91-900b224e5313-rabbitmq-plugins\") pod \"7e2a3efd-2de7-493e-af91-900b224e5313\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.496815 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7e2a3efd-2de7-493e-af91-900b224e5313-pod-info\") pod \"7e2a3efd-2de7-493e-af91-900b224e5313\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.496904 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7e2a3efd-2de7-493e-af91-900b224e5313-config-data\") pod \"7e2a3efd-2de7-493e-af91-900b224e5313\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.496940 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7e2a3efd-2de7-493e-af91-900b224e5313-server-conf\") pod \"7e2a3efd-2de7-493e-af91-900b224e5313\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.496975 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7e2a3efd-2de7-493e-af91-900b224e5313-plugins-conf\") pod \"7e2a3efd-2de7-493e-af91-900b224e5313\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.497005 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7e2a3efd-2de7-493e-af91-900b224e5313-erlang-cookie-secret\") pod \"7e2a3efd-2de7-493e-af91-900b224e5313\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.497036 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9p5p9\" (UniqueName: \"kubernetes.io/projected/7e2a3efd-2de7-493e-af91-900b224e5313-kube-api-access-9p5p9\") pod \"7e2a3efd-2de7-493e-af91-900b224e5313\" (UID: \"7e2a3efd-2de7-493e-af91-900b224e5313\") " Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.503054 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7e2a3efd-2de7-493e-af91-900b224e5313-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "7e2a3efd-2de7-493e-af91-900b224e5313" (UID: "7e2a3efd-2de7-493e-af91-900b224e5313"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.504211 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7e2a3efd-2de7-493e-af91-900b224e5313-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "7e2a3efd-2de7-493e-af91-900b224e5313" (UID: "7e2a3efd-2de7-493e-af91-900b224e5313"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.508536 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e2a3efd-2de7-493e-af91-900b224e5313-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "7e2a3efd-2de7-493e-af91-900b224e5313" (UID: "7e2a3efd-2de7-493e-af91-900b224e5313"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.509810 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "persistence") pod "7e2a3efd-2de7-493e-af91-900b224e5313" (UID: "7e2a3efd-2de7-493e-af91-900b224e5313"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.510187 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e2a3efd-2de7-493e-af91-900b224e5313-kube-api-access-9p5p9" (OuterVolumeSpecName: "kube-api-access-9p5p9") pod "7e2a3efd-2de7-493e-af91-900b224e5313" (UID: "7e2a3efd-2de7-493e-af91-900b224e5313"). InnerVolumeSpecName "kube-api-access-9p5p9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.511549 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e2a3efd-2de7-493e-af91-900b224e5313-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "7e2a3efd-2de7-493e-af91-900b224e5313" (UID: "7e2a3efd-2de7-493e-af91-900b224e5313"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.518400 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/7e2a3efd-2de7-493e-af91-900b224e5313-pod-info" (OuterVolumeSpecName: "pod-info") pod "7e2a3efd-2de7-493e-af91-900b224e5313" (UID: "7e2a3efd-2de7-493e-af91-900b224e5313"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.532041 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e2a3efd-2de7-493e-af91-900b224e5313-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "7e2a3efd-2de7-493e-af91-900b224e5313" (UID: "7e2a3efd-2de7-493e-af91-900b224e5313"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.536973 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e2a3efd-2de7-493e-af91-900b224e5313-config-data" (OuterVolumeSpecName: "config-data") pod "7e2a3efd-2de7-493e-af91-900b224e5313" (UID: "7e2a3efd-2de7-493e-af91-900b224e5313"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.537200 4910 generic.go:334] "Generic (PLEG): container finished" podID="97c873ec-c28a-4121-bac2-98b49c6b42a0" containerID="4e8b2fc70196427c5c99643640fbe7135d80de9a670ca3af9c02eb288b8aa7e3" exitCode=0 Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.537265 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7bbfdb8fcf-zlpw8" event={"ID":"97c873ec-c28a-4121-bac2-98b49c6b42a0","Type":"ContainerDied","Data":"4e8b2fc70196427c5c99643640fbe7135d80de9a670ca3af9c02eb288b8aa7e3"} Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.546299 4910 generic.go:334] "Generic (PLEG): container finished" podID="b9cedfb5-8c45-434f-b04d-694bf6d600b8" containerID="2c95bc32934ba46ce9701d8eb4e4fdb43de1b82593499f287b4f2c6458380007" exitCode=0 Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.546369 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b9cedfb5-8c45-434f-b04d-694bf6d600b8","Type":"ContainerDied","Data":"2c95bc32934ba46ce9701d8eb4e4fdb43de1b82593499f287b4f2c6458380007"} Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.552759 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e2a3efd-2de7-493e-af91-900b224e5313-server-conf" (OuterVolumeSpecName: "server-conf") pod "7e2a3efd-2de7-493e-af91-900b224e5313" (UID: "7e2a3efd-2de7-493e-af91-900b224e5313"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.596709 4910 scope.go:117] "RemoveContainer" containerID="dd977da3f8e7fc9fff03a9de2e1898d7cae116843deeda14da1e479c7ce300a4" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.601274 4910 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7e2a3efd-2de7-493e-af91-900b224e5313-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.601288 4910 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7e2a3efd-2de7-493e-af91-900b224e5313-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.601297 4910 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7e2a3efd-2de7-493e-af91-900b224e5313-pod-info\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.601304 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7e2a3efd-2de7-493e-af91-900b224e5313-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.601312 4910 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7e2a3efd-2de7-493e-af91-900b224e5313-server-conf\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.601320 4910 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7e2a3efd-2de7-493e-af91-900b224e5313-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.601328 4910 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7e2a3efd-2de7-493e-af91-900b224e5313-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.601338 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9p5p9\" (UniqueName: \"kubernetes.io/projected/7e2a3efd-2de7-493e-af91-900b224e5313-kube-api-access-9p5p9\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.601360 4910 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.601368 4910 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7e2a3efd-2de7-493e-af91-900b224e5313-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.602831 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.629998 4910 scope.go:117] "RemoveContainer" containerID="b200e9f40ae5b0a34ae3718175edc6e00f0e7819999c5ddcf7777af1ffb93d24" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.644169 4910 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.659449 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e2a3efd-2de7-493e-af91-900b224e5313-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "7e2a3efd-2de7-493e-af91-900b224e5313" (UID: "7e2a3efd-2de7-493e-af91-900b224e5313"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.666612 4910 scope.go:117] "RemoveContainer" containerID="642125ac821bb754a0c42680f8f99f5a13b1a90ac3a61d0a934715684f4248eb" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.686054 4910 scope.go:117] "RemoveContainer" containerID="299a441a9ea2f52977cbffea7f3f23ff9a1fa10c75e20ad3f6d05cf9c52d97b4" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.702109 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b9cedfb5-8c45-434f-b04d-694bf6d600b8-rabbitmq-confd\") pod \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.702204 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.702255 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b9cedfb5-8c45-434f-b04d-694bf6d600b8-erlang-cookie-secret\") pod \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.702278 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b9cedfb5-8c45-434f-b04d-694bf6d600b8-rabbitmq-tls\") pod \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.702304 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b9cedfb5-8c45-434f-b04d-694bf6d600b8-server-conf\") pod \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.702358 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b9cedfb5-8c45-434f-b04d-694bf6d600b8-pod-info\") pod \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.702384 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nkqrn\" (UniqueName: \"kubernetes.io/projected/b9cedfb5-8c45-434f-b04d-694bf6d600b8-kube-api-access-nkqrn\") pod \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.702449 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b9cedfb5-8c45-434f-b04d-694bf6d600b8-config-data\") pod \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.702473 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b9cedfb5-8c45-434f-b04d-694bf6d600b8-rabbitmq-erlang-cookie\") pod \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.702543 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b9cedfb5-8c45-434f-b04d-694bf6d600b8-plugins-conf\") pod \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.702558 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b9cedfb5-8c45-434f-b04d-694bf6d600b8-rabbitmq-plugins\") pod \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\" (UID: \"b9cedfb5-8c45-434f-b04d-694bf6d600b8\") " Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.702830 4910 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7e2a3efd-2de7-493e-af91-900b224e5313-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.702845 4910 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.703220 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b9cedfb5-8c45-434f-b04d-694bf6d600b8-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "b9cedfb5-8c45-434f-b04d-694bf6d600b8" (UID: "b9cedfb5-8c45-434f-b04d-694bf6d600b8"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.706485 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b9cedfb5-8c45-434f-b04d-694bf6d600b8-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "b9cedfb5-8c45-434f-b04d-694bf6d600b8" (UID: "b9cedfb5-8c45-434f-b04d-694bf6d600b8"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.706573 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9cedfb5-8c45-434f-b04d-694bf6d600b8-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "b9cedfb5-8c45-434f-b04d-694bf6d600b8" (UID: "b9cedfb5-8c45-434f-b04d-694bf6d600b8"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.712312 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9cedfb5-8c45-434f-b04d-694bf6d600b8-kube-api-access-nkqrn" (OuterVolumeSpecName: "kube-api-access-nkqrn") pod "b9cedfb5-8c45-434f-b04d-694bf6d600b8" (UID: "b9cedfb5-8c45-434f-b04d-694bf6d600b8"). InnerVolumeSpecName "kube-api-access-nkqrn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.712377 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "persistence") pod "b9cedfb5-8c45-434f-b04d-694bf6d600b8" (UID: "b9cedfb5-8c45-434f-b04d-694bf6d600b8"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.712412 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9cedfb5-8c45-434f-b04d-694bf6d600b8-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "b9cedfb5-8c45-434f-b04d-694bf6d600b8" (UID: "b9cedfb5-8c45-434f-b04d-694bf6d600b8"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.712534 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/b9cedfb5-8c45-434f-b04d-694bf6d600b8-pod-info" (OuterVolumeSpecName: "pod-info") pod "b9cedfb5-8c45-434f-b04d-694bf6d600b8" (UID: "b9cedfb5-8c45-434f-b04d-694bf6d600b8"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.713931 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9cedfb5-8c45-434f-b04d-694bf6d600b8-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "b9cedfb5-8c45-434f-b04d-694bf6d600b8" (UID: "b9cedfb5-8c45-434f-b04d-694bf6d600b8"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.732629 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9cedfb5-8c45-434f-b04d-694bf6d600b8-config-data" (OuterVolumeSpecName: "config-data") pod "b9cedfb5-8c45-434f-b04d-694bf6d600b8" (UID: "b9cedfb5-8c45-434f-b04d-694bf6d600b8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.739436 4910 scope.go:117] "RemoveContainer" containerID="642125ac821bb754a0c42680f8f99f5a13b1a90ac3a61d0a934715684f4248eb" Jan 05 22:15:29 crc kubenswrapper[4910]: E0105 22:15:29.740803 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"642125ac821bb754a0c42680f8f99f5a13b1a90ac3a61d0a934715684f4248eb\": container with ID starting with 642125ac821bb754a0c42680f8f99f5a13b1a90ac3a61d0a934715684f4248eb not found: ID does not exist" containerID="642125ac821bb754a0c42680f8f99f5a13b1a90ac3a61d0a934715684f4248eb" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.740843 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"642125ac821bb754a0c42680f8f99f5a13b1a90ac3a61d0a934715684f4248eb"} err="failed to get container status \"642125ac821bb754a0c42680f8f99f5a13b1a90ac3a61d0a934715684f4248eb\": rpc error: code = NotFound desc = could not find container \"642125ac821bb754a0c42680f8f99f5a13b1a90ac3a61d0a934715684f4248eb\": container with ID starting with 642125ac821bb754a0c42680f8f99f5a13b1a90ac3a61d0a934715684f4248eb not found: ID does not exist" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.740871 4910 scope.go:117] "RemoveContainer" containerID="299a441a9ea2f52977cbffea7f3f23ff9a1fa10c75e20ad3f6d05cf9c52d97b4" Jan 05 22:15:29 crc kubenswrapper[4910]: E0105 22:15:29.741815 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"299a441a9ea2f52977cbffea7f3f23ff9a1fa10c75e20ad3f6d05cf9c52d97b4\": container with ID starting with 299a441a9ea2f52977cbffea7f3f23ff9a1fa10c75e20ad3f6d05cf9c52d97b4 not found: ID does not exist" containerID="299a441a9ea2f52977cbffea7f3f23ff9a1fa10c75e20ad3f6d05cf9c52d97b4" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.741838 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"299a441a9ea2f52977cbffea7f3f23ff9a1fa10c75e20ad3f6d05cf9c52d97b4"} err="failed to get container status \"299a441a9ea2f52977cbffea7f3f23ff9a1fa10c75e20ad3f6d05cf9c52d97b4\": rpc error: code = NotFound desc = could not find container \"299a441a9ea2f52977cbffea7f3f23ff9a1fa10c75e20ad3f6d05cf9c52d97b4\": container with ID starting with 299a441a9ea2f52977cbffea7f3f23ff9a1fa10c75e20ad3f6d05cf9c52d97b4 not found: ID does not exist" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.742542 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.798527 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9cedfb5-8c45-434f-b04d-694bf6d600b8-server-conf" (OuterVolumeSpecName: "server-conf") pod "b9cedfb5-8c45-434f-b04d-694bf6d600b8" (UID: "b9cedfb5-8c45-434f-b04d-694bf6d600b8"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.804050 4910 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b9cedfb5-8c45-434f-b04d-694bf6d600b8-pod-info\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.804088 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nkqrn\" (UniqueName: \"kubernetes.io/projected/b9cedfb5-8c45-434f-b04d-694bf6d600b8-kube-api-access-nkqrn\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.804101 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b9cedfb5-8c45-434f-b04d-694bf6d600b8-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.804113 4910 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b9cedfb5-8c45-434f-b04d-694bf6d600b8-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.805523 4910 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b9cedfb5-8c45-434f-b04d-694bf6d600b8-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.805538 4910 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b9cedfb5-8c45-434f-b04d-694bf6d600b8-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.805563 4910 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.805575 4910 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b9cedfb5-8c45-434f-b04d-694bf6d600b8-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.805605 4910 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b9cedfb5-8c45-434f-b04d-694bf6d600b8-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.805614 4910 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b9cedfb5-8c45-434f-b04d-694bf6d600b8-server-conf\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.831891 4910 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.840850 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9cedfb5-8c45-434f-b04d-694bf6d600b8-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "b9cedfb5-8c45-434f-b04d-694bf6d600b8" (UID: "b9cedfb5-8c45-434f-b04d-694bf6d600b8"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.844992 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7bbfdb8fcf-zlpw8" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.855546 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.860457 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.907145 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2cb18efe-a80d-4657-921d-af4a18ae279d-galera-tls-certs\") pod \"2cb18efe-a80d-4657-921d-af4a18ae279d\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") " Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.907218 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"2cb18efe-a80d-4657-921d-af4a18ae279d\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") " Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.907250 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2cb18efe-a80d-4657-921d-af4a18ae279d-config-data-default\") pod \"2cb18efe-a80d-4657-921d-af4a18ae279d\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") " Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.907417 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-57vm5\" (UniqueName: \"kubernetes.io/projected/2cb18efe-a80d-4657-921d-af4a18ae279d-kube-api-access-57vm5\") pod \"2cb18efe-a80d-4657-921d-af4a18ae279d\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") " Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.907498 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2cb18efe-a80d-4657-921d-af4a18ae279d-operator-scripts\") pod \"2cb18efe-a80d-4657-921d-af4a18ae279d\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") " Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.907583 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2cb18efe-a80d-4657-921d-af4a18ae279d-kolla-config\") pod \"2cb18efe-a80d-4657-921d-af4a18ae279d\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") " Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.907621 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2cb18efe-a80d-4657-921d-af4a18ae279d-combined-ca-bundle\") pod \"2cb18efe-a80d-4657-921d-af4a18ae279d\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") " Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.907697 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2cb18efe-a80d-4657-921d-af4a18ae279d-config-data-generated\") pod \"2cb18efe-a80d-4657-921d-af4a18ae279d\" (UID: \"2cb18efe-a80d-4657-921d-af4a18ae279d\") " Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.908376 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2cb18efe-a80d-4657-921d-af4a18ae279d-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "2cb18efe-a80d-4657-921d-af4a18ae279d" (UID: "2cb18efe-a80d-4657-921d-af4a18ae279d"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.908492 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2cb18efe-a80d-4657-921d-af4a18ae279d-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "2cb18efe-a80d-4657-921d-af4a18ae279d" (UID: "2cb18efe-a80d-4657-921d-af4a18ae279d"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.908617 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2cb18efe-a80d-4657-921d-af4a18ae279d-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "2cb18efe-a80d-4657-921d-af4a18ae279d" (UID: "2cb18efe-a80d-4657-921d-af4a18ae279d"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.909029 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2cb18efe-a80d-4657-921d-af4a18ae279d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2cb18efe-a80d-4657-921d-af4a18ae279d" (UID: "2cb18efe-a80d-4657-921d-af4a18ae279d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.909969 4910 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b9cedfb5-8c45-434f-b04d-694bf6d600b8-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.909996 4910 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.910006 4910 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2cb18efe-a80d-4657-921d-af4a18ae279d-kolla-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.911889 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2cb18efe-a80d-4657-921d-af4a18ae279d-kube-api-access-57vm5" (OuterVolumeSpecName: "kube-api-access-57vm5") pod "2cb18efe-a80d-4657-921d-af4a18ae279d" (UID: "2cb18efe-a80d-4657-921d-af4a18ae279d"). InnerVolumeSpecName "kube-api-access-57vm5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.917230 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "mysql-db") pod "2cb18efe-a80d-4657-921d-af4a18ae279d" (UID: "2cb18efe-a80d-4657-921d-af4a18ae279d"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.937373 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2cb18efe-a80d-4657-921d-af4a18ae279d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2cb18efe-a80d-4657-921d-af4a18ae279d" (UID: "2cb18efe-a80d-4657-921d-af4a18ae279d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:29 crc kubenswrapper[4910]: I0105 22:15:29.945468 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2cb18efe-a80d-4657-921d-af4a18ae279d-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "2cb18efe-a80d-4657-921d-af4a18ae279d" (UID: "2cb18efe-a80d-4657-921d-af4a18ae279d"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.011914 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-config-data\") pod \"97c873ec-c28a-4121-bac2-98b49c6b42a0\" (UID: \"97c873ec-c28a-4121-bac2-98b49c6b42a0\") " Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.011971 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vbxgh\" (UniqueName: \"kubernetes.io/projected/97c873ec-c28a-4121-bac2-98b49c6b42a0-kube-api-access-vbxgh\") pod \"97c873ec-c28a-4121-bac2-98b49c6b42a0\" (UID: \"97c873ec-c28a-4121-bac2-98b49c6b42a0\") " Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.012408 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-combined-ca-bundle\") pod \"97c873ec-c28a-4121-bac2-98b49c6b42a0\" (UID: \"97c873ec-c28a-4121-bac2-98b49c6b42a0\") " Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.012510 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-credential-keys\") pod \"97c873ec-c28a-4121-bac2-98b49c6b42a0\" (UID: \"97c873ec-c28a-4121-bac2-98b49c6b42a0\") " Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.012571 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-scripts\") pod \"97c873ec-c28a-4121-bac2-98b49c6b42a0\" (UID: \"97c873ec-c28a-4121-bac2-98b49c6b42a0\") " Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.012658 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-internal-tls-certs\") pod \"97c873ec-c28a-4121-bac2-98b49c6b42a0\" (UID: \"97c873ec-c28a-4121-bac2-98b49c6b42a0\") " Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.013899 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-public-tls-certs\") pod \"97c873ec-c28a-4121-bac2-98b49c6b42a0\" (UID: \"97c873ec-c28a-4121-bac2-98b49c6b42a0\") " Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.014279 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-fernet-keys\") pod \"97c873ec-c28a-4121-bac2-98b49c6b42a0\" (UID: \"97c873ec-c28a-4121-bac2-98b49c6b42a0\") " Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.014960 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97c873ec-c28a-4121-bac2-98b49c6b42a0-kube-api-access-vbxgh" (OuterVolumeSpecName: "kube-api-access-vbxgh") pod "97c873ec-c28a-4121-bac2-98b49c6b42a0" (UID: "97c873ec-c28a-4121-bac2-98b49c6b42a0"). InnerVolumeSpecName "kube-api-access-vbxgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.015686 4910 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2cb18efe-a80d-4657-921d-af4a18ae279d-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.015730 4910 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.015744 4910 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2cb18efe-a80d-4657-921d-af4a18ae279d-config-data-default\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.015756 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-57vm5\" (UniqueName: \"kubernetes.io/projected/2cb18efe-a80d-4657-921d-af4a18ae279d-kube-api-access-57vm5\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.015771 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2cb18efe-a80d-4657-921d-af4a18ae279d-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.015781 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vbxgh\" (UniqueName: \"kubernetes.io/projected/97c873ec-c28a-4121-bac2-98b49c6b42a0-kube-api-access-vbxgh\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.015792 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2cb18efe-a80d-4657-921d-af4a18ae279d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.015803 4910 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2cb18efe-a80d-4657-921d-af4a18ae279d-config-data-generated\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.016457 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-scripts" (OuterVolumeSpecName: "scripts") pod "97c873ec-c28a-4121-bac2-98b49c6b42a0" (UID: "97c873ec-c28a-4121-bac2-98b49c6b42a0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.016491 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "97c873ec-c28a-4121-bac2-98b49c6b42a0" (UID: "97c873ec-c28a-4121-bac2-98b49c6b42a0"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.037640 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "97c873ec-c28a-4121-bac2-98b49c6b42a0" (UID: "97c873ec-c28a-4121-bac2-98b49c6b42a0"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.040547 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-config-data" (OuterVolumeSpecName: "config-data") pod "97c873ec-c28a-4121-bac2-98b49c6b42a0" (UID: "97c873ec-c28a-4121-bac2-98b49c6b42a0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.043621 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "97c873ec-c28a-4121-bac2-98b49c6b42a0" (UID: "97c873ec-c28a-4121-bac2-98b49c6b42a0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.051329 4910 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.071336 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "97c873ec-c28a-4121-bac2-98b49c6b42a0" (UID: "97c873ec-c28a-4121-bac2-98b49c6b42a0"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.072097 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "97c873ec-c28a-4121-bac2-98b49c6b42a0" (UID: "97c873ec-c28a-4121-bac2-98b49c6b42a0"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.117167 4910 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.117200 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.117213 4910 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.117225 4910 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.117235 4910 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.117245 4910 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.117256 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.117269 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97c873ec-c28a-4121-bac2-98b49c6b42a0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.563136 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.563151 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"2cb18efe-a80d-4657-921d-af4a18ae279d","Type":"ContainerDied","Data":"e30bba8cb56bd6363291eb1e68f0992c5dacc1341f078f128e879d084067cc3b"} Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.563204 4910 scope.go:117] "RemoveContainer" containerID="631d63a96f64fb0aa20db63e43afb3158c0927307ea2182cd6951a7f9852fdca" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.570342 4910 generic.go:334] "Generic (PLEG): container finished" podID="83319bb4-7278-49b3-8ef2-beb8baa0a1a6" containerID="03e2a0482d96bb74144b1ebf3502bf0c9e701db7ab42a851ca5abd53fadbfdf7" exitCode=0 Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.570404 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"83319bb4-7278-49b3-8ef2-beb8baa0a1a6","Type":"ContainerDied","Data":"03e2a0482d96bb74144b1ebf3502bf0c9e701db7ab42a851ca5abd53fadbfdf7"} Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.572334 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7bbfdb8fcf-zlpw8" event={"ID":"97c873ec-c28a-4121-bac2-98b49c6b42a0","Type":"ContainerDied","Data":"c6b1e392bb89b0aa402e5d93ba7298e8ab8df4208ab11d5d3207690e4b81280a"} Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.572356 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7bbfdb8fcf-zlpw8" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.575947 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.575943 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b9cedfb5-8c45-434f-b04d-694bf6d600b8","Type":"ContainerDied","Data":"c17220f847316999c4506c2acff54beeb887dc60e3db70fa4fdc73a07da0cd76"} Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.636284 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.653927 4910 scope.go:117] "RemoveContainer" containerID="44a69179172486de1cfaee52c5b45f28ff6e2522c6ba8a153a50795a10335125" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.665717 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.674364 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.684343 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.687716 4910 scope.go:117] "RemoveContainer" containerID="4e8b2fc70196427c5c99643640fbe7135d80de9a670ca3af9c02eb288b8aa7e3" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.698036 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-galera-0"] Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.705559 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-7bbfdb8fcf-zlpw8"] Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.712546 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-7bbfdb8fcf-zlpw8"] Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.726728 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83319bb4-7278-49b3-8ef2-beb8baa0a1a6-combined-ca-bundle\") pod \"83319bb4-7278-49b3-8ef2-beb8baa0a1a6\" (UID: \"83319bb4-7278-49b3-8ef2-beb8baa0a1a6\") " Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.726839 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qn2l9\" (UniqueName: \"kubernetes.io/projected/83319bb4-7278-49b3-8ef2-beb8baa0a1a6-kube-api-access-qn2l9\") pod \"83319bb4-7278-49b3-8ef2-beb8baa0a1a6\" (UID: \"83319bb4-7278-49b3-8ef2-beb8baa0a1a6\") " Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.726902 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83319bb4-7278-49b3-8ef2-beb8baa0a1a6-config-data\") pod \"83319bb4-7278-49b3-8ef2-beb8baa0a1a6\" (UID: \"83319bb4-7278-49b3-8ef2-beb8baa0a1a6\") " Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.731582 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2210f0ce-43b3-4560-84f8-b56a65414758" path="/var/lib/kubelet/pods/2210f0ce-43b3-4560-84f8-b56a65414758/volumes" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.732033 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2cb18efe-a80d-4657-921d-af4a18ae279d" path="/var/lib/kubelet/pods/2cb18efe-a80d-4657-921d-af4a18ae279d/volumes" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.732616 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39608078-4c49-4ca6-b9d4-6cdd37d89f91" path="/var/lib/kubelet/pods/39608078-4c49-4ca6-b9d4-6cdd37d89f91/volumes" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.733591 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d881977-4280-42f6-8ec5-65be97c8dc28" path="/var/lib/kubelet/pods/3d881977-4280-42f6-8ec5-65be97c8dc28/volumes" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.734382 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e2a3efd-2de7-493e-af91-900b224e5313" path="/var/lib/kubelet/pods/7e2a3efd-2de7-493e-af91-900b224e5313/volumes" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.735722 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97c873ec-c28a-4121-bac2-98b49c6b42a0" path="/var/lib/kubelet/pods/97c873ec-c28a-4121-bac2-98b49c6b42a0/volumes" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.736657 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9cedfb5-8c45-434f-b04d-694bf6d600b8" path="/var/lib/kubelet/pods/b9cedfb5-8c45-434f-b04d-694bf6d600b8/volumes" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.737201 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce8ea9ec-e799-457a-aaca-e16b591bdf0c" path="/var/lib/kubelet/pods/ce8ea9ec-e799-457a-aaca-e16b591bdf0c/volumes" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.738023 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da0001c8-7a9c-47a8-b901-03066bf6a7ff" path="/var/lib/kubelet/pods/da0001c8-7a9c-47a8-b901-03066bf6a7ff/volumes" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.744455 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83319bb4-7278-49b3-8ef2-beb8baa0a1a6-kube-api-access-qn2l9" (OuterVolumeSpecName: "kube-api-access-qn2l9") pod "83319bb4-7278-49b3-8ef2-beb8baa0a1a6" (UID: "83319bb4-7278-49b3-8ef2-beb8baa0a1a6"). InnerVolumeSpecName "kube-api-access-qn2l9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.750186 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83319bb4-7278-49b3-8ef2-beb8baa0a1a6-config-data" (OuterVolumeSpecName: "config-data") pod "83319bb4-7278-49b3-8ef2-beb8baa0a1a6" (UID: "83319bb4-7278-49b3-8ef2-beb8baa0a1a6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.769478 4910 scope.go:117] "RemoveContainer" containerID="2c95bc32934ba46ce9701d8eb4e4fdb43de1b82593499f287b4f2c6458380007" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.774129 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83319bb4-7278-49b3-8ef2-beb8baa0a1a6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "83319bb4-7278-49b3-8ef2-beb8baa0a1a6" (UID: "83319bb4-7278-49b3-8ef2-beb8baa0a1a6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.792754 4910 scope.go:117] "RemoveContainer" containerID="2dd0985809b50b7237e41b4d234a09fc5fdb093346ee879d550b4f63215e2788" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.829425 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qn2l9\" (UniqueName: \"kubernetes.io/projected/83319bb4-7278-49b3-8ef2-beb8baa0a1a6-kube-api-access-qn2l9\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.829452 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83319bb4-7278-49b3-8ef2-beb8baa0a1a6-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:30 crc kubenswrapper[4910]: I0105 22:15:30.829464 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83319bb4-7278-49b3-8ef2-beb8baa0a1a6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:31 crc kubenswrapper[4910]: I0105 22:15:31.594524 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"83319bb4-7278-49b3-8ef2-beb8baa0a1a6","Type":"ContainerDied","Data":"1560d48dd447293696b517aa3d065e515c4b5269315738b62065aa6566e62faa"} Jan 05 22:15:31 crc kubenswrapper[4910]: I0105 22:15:31.594574 4910 scope.go:117] "RemoveContainer" containerID="03e2a0482d96bb74144b1ebf3502bf0c9e701db7ab42a851ca5abd53fadbfdf7" Jan 05 22:15:31 crc kubenswrapper[4910]: I0105 22:15:31.594663 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 05 22:15:31 crc kubenswrapper[4910]: I0105 22:15:31.639504 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 22:15:31 crc kubenswrapper[4910]: I0105 22:15:31.657389 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 22:15:31 crc kubenswrapper[4910]: I0105 22:15:31.836948 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" podUID="45acd92f-2e5d-4fc1-8b91-c91f165e786a" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.159:9311/healthcheck\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 05 22:15:31 crc kubenswrapper[4910]: I0105 22:15:31.837536 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-6bbbdf8dc6-s6tmf" podUID="45acd92f-2e5d-4fc1-8b91-c91f165e786a" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.159:9311/healthcheck\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 05 22:15:32 crc kubenswrapper[4910]: I0105 22:15:32.382239 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/memcached-0" podUID="39608078-4c49-4ca6-b9d4-6cdd37d89f91" containerName="memcached" probeResult="failure" output="dial tcp 10.217.0.104:11211: i/o timeout" Jan 05 22:15:32 crc kubenswrapper[4910]: I0105 22:15:32.734217 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83319bb4-7278-49b3-8ef2-beb8baa0a1a6" path="/var/lib/kubelet/pods/83319bb4-7278-49b3-8ef2-beb8baa0a1a6/volumes" Jan 05 22:15:33 crc kubenswrapper[4910]: E0105 22:15:33.447173 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338 is running failed: container process not found" containerID="1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 05 22:15:33 crc kubenswrapper[4910]: E0105 22:15:33.447758 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338 is running failed: container process not found" containerID="1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 05 22:15:33 crc kubenswrapper[4910]: E0105 22:15:33.448278 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338 is running failed: container process not found" containerID="1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 05 22:15:33 crc kubenswrapper[4910]: E0105 22:15:33.448309 4910 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-9g2kt" podUID="780aad6a-41ff-410c-a6fc-6be2faf38b6f" containerName="ovsdb-server" Jan 05 22:15:33 crc kubenswrapper[4910]: E0105 22:15:33.453693 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9b905fc177f4972520c21e7cfca7402f4ec2958051e09f353da013b9785c1fd5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 05 22:15:33 crc kubenswrapper[4910]: E0105 22:15:33.457307 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9b905fc177f4972520c21e7cfca7402f4ec2958051e09f353da013b9785c1fd5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 05 22:15:33 crc kubenswrapper[4910]: E0105 22:15:33.462220 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9b905fc177f4972520c21e7cfca7402f4ec2958051e09f353da013b9785c1fd5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 05 22:15:33 crc kubenswrapper[4910]: E0105 22:15:33.462317 4910 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-9g2kt" podUID="780aad6a-41ff-410c-a6fc-6be2faf38b6f" containerName="ovs-vswitchd" Jan 05 22:15:38 crc kubenswrapper[4910]: E0105 22:15:38.446707 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338 is running failed: container process not found" containerID="1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 05 22:15:38 crc kubenswrapper[4910]: E0105 22:15:38.447826 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338 is running failed: container process not found" containerID="1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 05 22:15:38 crc kubenswrapper[4910]: E0105 22:15:38.447870 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9b905fc177f4972520c21e7cfca7402f4ec2958051e09f353da013b9785c1fd5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 05 22:15:38 crc kubenswrapper[4910]: E0105 22:15:38.448253 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338 is running failed: container process not found" containerID="1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 05 22:15:38 crc kubenswrapper[4910]: E0105 22:15:38.448285 4910 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-9g2kt" podUID="780aad6a-41ff-410c-a6fc-6be2faf38b6f" containerName="ovsdb-server" Jan 05 22:15:38 crc kubenswrapper[4910]: E0105 22:15:38.449346 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9b905fc177f4972520c21e7cfca7402f4ec2958051e09f353da013b9785c1fd5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 05 22:15:38 crc kubenswrapper[4910]: E0105 22:15:38.452276 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9b905fc177f4972520c21e7cfca7402f4ec2958051e09f353da013b9785c1fd5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 05 22:15:38 crc kubenswrapper[4910]: E0105 22:15:38.452390 4910 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-9g2kt" podUID="780aad6a-41ff-410c-a6fc-6be2faf38b6f" containerName="ovs-vswitchd" Jan 05 22:15:40 crc kubenswrapper[4910]: I0105 22:15:40.952101 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:15:40 crc kubenswrapper[4910]: I0105 22:15:40.952552 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:15:40 crc kubenswrapper[4910]: I0105 22:15:40.952596 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 22:15:40 crc kubenswrapper[4910]: I0105 22:15:40.952992 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3c994ce088089ca2a9dc19bf92bc43649f3bc30178471fa64d55a2db65d9d2ab"} pod="openshift-machine-config-operator/machine-config-daemon-p4t85" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 05 22:15:40 crc kubenswrapper[4910]: I0105 22:15:40.953036 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" containerID="cri-o://3c994ce088089ca2a9dc19bf92bc43649f3bc30178471fa64d55a2db65d9d2ab" gracePeriod=600 Jan 05 22:15:41 crc kubenswrapper[4910]: I0105 22:15:41.717687 4910 generic.go:334] "Generic (PLEG): container finished" podID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerID="3c994ce088089ca2a9dc19bf92bc43649f3bc30178471fa64d55a2db65d9d2ab" exitCode=0 Jan 05 22:15:41 crc kubenswrapper[4910]: I0105 22:15:41.717738 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerDied","Data":"3c994ce088089ca2a9dc19bf92bc43649f3bc30178471fa64d55a2db65d9d2ab"} Jan 05 22:15:41 crc kubenswrapper[4910]: I0105 22:15:41.718090 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerStarted","Data":"88fdda57c119e3e2037b5a97cf4fa9afe875ac076db0f345ac5091c08bf08c43"} Jan 05 22:15:41 crc kubenswrapper[4910]: I0105 22:15:41.718147 4910 scope.go:117] "RemoveContainer" containerID="657357707be4d8c777ee71d089740dbf0952f7ef5dc120116497297a0abbc7b5" Jan 05 22:15:43 crc kubenswrapper[4910]: E0105 22:15:43.447640 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338 is running failed: container process not found" containerID="1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 05 22:15:43 crc kubenswrapper[4910]: E0105 22:15:43.450505 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338 is running failed: container process not found" containerID="1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 05 22:15:43 crc kubenswrapper[4910]: E0105 22:15:43.450749 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9b905fc177f4972520c21e7cfca7402f4ec2958051e09f353da013b9785c1fd5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 05 22:15:43 crc kubenswrapper[4910]: E0105 22:15:43.450914 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338 is running failed: container process not found" containerID="1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 05 22:15:43 crc kubenswrapper[4910]: E0105 22:15:43.450969 4910 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-9g2kt" podUID="780aad6a-41ff-410c-a6fc-6be2faf38b6f" containerName="ovsdb-server" Jan 05 22:15:43 crc kubenswrapper[4910]: E0105 22:15:43.452304 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9b905fc177f4972520c21e7cfca7402f4ec2958051e09f353da013b9785c1fd5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 05 22:15:43 crc kubenswrapper[4910]: E0105 22:15:43.453829 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9b905fc177f4972520c21e7cfca7402f4ec2958051e09f353da013b9785c1fd5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 05 22:15:43 crc kubenswrapper[4910]: E0105 22:15:43.453874 4910 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-9g2kt" podUID="780aad6a-41ff-410c-a6fc-6be2faf38b6f" containerName="ovs-vswitchd" Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.449522 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6c69d8c8f7-7w2gb" Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.549884 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-combined-ca-bundle\") pod \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\" (UID: \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\") " Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.549936 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-config\") pod \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\" (UID: \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\") " Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.550063 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-internal-tls-certs\") pod \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\" (UID: \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\") " Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.550192 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ftrb5\" (UniqueName: \"kubernetes.io/projected/227b48c0-2e23-4048-8fb5-21628bd9e5e0-kube-api-access-ftrb5\") pod \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\" (UID: \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\") " Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.550225 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-public-tls-certs\") pod \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\" (UID: \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\") " Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.550247 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-ovndb-tls-certs\") pod \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\" (UID: \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\") " Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.550308 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-httpd-config\") pod \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\" (UID: \"227b48c0-2e23-4048-8fb5-21628bd9e5e0\") " Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.556876 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/227b48c0-2e23-4048-8fb5-21628bd9e5e0-kube-api-access-ftrb5" (OuterVolumeSpecName: "kube-api-access-ftrb5") pod "227b48c0-2e23-4048-8fb5-21628bd9e5e0" (UID: "227b48c0-2e23-4048-8fb5-21628bd9e5e0"). InnerVolumeSpecName "kube-api-access-ftrb5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.566353 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "227b48c0-2e23-4048-8fb5-21628bd9e5e0" (UID: "227b48c0-2e23-4048-8fb5-21628bd9e5e0"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.590523 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-config" (OuterVolumeSpecName: "config") pod "227b48c0-2e23-4048-8fb5-21628bd9e5e0" (UID: "227b48c0-2e23-4048-8fb5-21628bd9e5e0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.595022 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "227b48c0-2e23-4048-8fb5-21628bd9e5e0" (UID: "227b48c0-2e23-4048-8fb5-21628bd9e5e0"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.595769 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "227b48c0-2e23-4048-8fb5-21628bd9e5e0" (UID: "227b48c0-2e23-4048-8fb5-21628bd9e5e0"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.600806 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "227b48c0-2e23-4048-8fb5-21628bd9e5e0" (UID: "227b48c0-2e23-4048-8fb5-21628bd9e5e0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.614391 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "227b48c0-2e23-4048-8fb5-21628bd9e5e0" (UID: "227b48c0-2e23-4048-8fb5-21628bd9e5e0"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.653106 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ftrb5\" (UniqueName: \"kubernetes.io/projected/227b48c0-2e23-4048-8fb5-21628bd9e5e0-kube-api-access-ftrb5\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.653195 4910 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.653209 4910 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.653247 4910 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.653265 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.653279 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-config\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.653292 4910 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/227b48c0-2e23-4048-8fb5-21628bd9e5e0-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.769444 4910 generic.go:334] "Generic (PLEG): container finished" podID="227b48c0-2e23-4048-8fb5-21628bd9e5e0" containerID="5e9cd39ea8845a5fd2c6e7c0fe1c864ac551845861a02b8b20ce5e8da8cd01fb" exitCode=0 Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.769494 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6c69d8c8f7-7w2gb" event={"ID":"227b48c0-2e23-4048-8fb5-21628bd9e5e0","Type":"ContainerDied","Data":"5e9cd39ea8845a5fd2c6e7c0fe1c864ac551845861a02b8b20ce5e8da8cd01fb"} Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.769536 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6c69d8c8f7-7w2gb" event={"ID":"227b48c0-2e23-4048-8fb5-21628bd9e5e0","Type":"ContainerDied","Data":"d99c8f190fc33bc92e2cfd7b4ba8802c8d2aee488a17c92d2a53677ceb742870"} Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.769557 4910 scope.go:117] "RemoveContainer" containerID="0ce63635905b4359223cc707716af9867aeeb87e2e260750761f5c1bca381777" Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.769688 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6c69d8c8f7-7w2gb" Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.799353 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-6c69d8c8f7-7w2gb"] Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.800431 4910 scope.go:117] "RemoveContainer" containerID="5e9cd39ea8845a5fd2c6e7c0fe1c864ac551845861a02b8b20ce5e8da8cd01fb" Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.804522 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-6c69d8c8f7-7w2gb"] Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.824406 4910 scope.go:117] "RemoveContainer" containerID="0ce63635905b4359223cc707716af9867aeeb87e2e260750761f5c1bca381777" Jan 05 22:15:44 crc kubenswrapper[4910]: E0105 22:15:44.824920 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ce63635905b4359223cc707716af9867aeeb87e2e260750761f5c1bca381777\": container with ID starting with 0ce63635905b4359223cc707716af9867aeeb87e2e260750761f5c1bca381777 not found: ID does not exist" containerID="0ce63635905b4359223cc707716af9867aeeb87e2e260750761f5c1bca381777" Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.824950 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ce63635905b4359223cc707716af9867aeeb87e2e260750761f5c1bca381777"} err="failed to get container status \"0ce63635905b4359223cc707716af9867aeeb87e2e260750761f5c1bca381777\": rpc error: code = NotFound desc = could not find container \"0ce63635905b4359223cc707716af9867aeeb87e2e260750761f5c1bca381777\": container with ID starting with 0ce63635905b4359223cc707716af9867aeeb87e2e260750761f5c1bca381777 not found: ID does not exist" Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.824981 4910 scope.go:117] "RemoveContainer" containerID="5e9cd39ea8845a5fd2c6e7c0fe1c864ac551845861a02b8b20ce5e8da8cd01fb" Jan 05 22:15:44 crc kubenswrapper[4910]: E0105 22:15:44.825372 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5e9cd39ea8845a5fd2c6e7c0fe1c864ac551845861a02b8b20ce5e8da8cd01fb\": container with ID starting with 5e9cd39ea8845a5fd2c6e7c0fe1c864ac551845861a02b8b20ce5e8da8cd01fb not found: ID does not exist" containerID="5e9cd39ea8845a5fd2c6e7c0fe1c864ac551845861a02b8b20ce5e8da8cd01fb" Jan 05 22:15:44 crc kubenswrapper[4910]: I0105 22:15:44.825404 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5e9cd39ea8845a5fd2c6e7c0fe1c864ac551845861a02b8b20ce5e8da8cd01fb"} err="failed to get container status \"5e9cd39ea8845a5fd2c6e7c0fe1c864ac551845861a02b8b20ce5e8da8cd01fb\": rpc error: code = NotFound desc = could not find container \"5e9cd39ea8845a5fd2c6e7c0fe1c864ac551845861a02b8b20ce5e8da8cd01fb\": container with ID starting with 5e9cd39ea8845a5fd2c6e7c0fe1c864ac551845861a02b8b20ce5e8da8cd01fb not found: ID does not exist" Jan 05 22:15:46 crc kubenswrapper[4910]: I0105 22:15:46.739573 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="227b48c0-2e23-4048-8fb5-21628bd9e5e0" path="/var/lib/kubelet/pods/227b48c0-2e23-4048-8fb5-21628bd9e5e0/volumes" Jan 05 22:15:48 crc kubenswrapper[4910]: E0105 22:15:48.447535 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338 is running failed: container process not found" containerID="1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 05 22:15:48 crc kubenswrapper[4910]: E0105 22:15:48.448637 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338 is running failed: container process not found" containerID="1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 05 22:15:48 crc kubenswrapper[4910]: E0105 22:15:48.448947 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338 is running failed: container process not found" containerID="1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 05 22:15:48 crc kubenswrapper[4910]: E0105 22:15:48.448984 4910 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-9g2kt" podUID="780aad6a-41ff-410c-a6fc-6be2faf38b6f" containerName="ovsdb-server" Jan 05 22:15:48 crc kubenswrapper[4910]: E0105 22:15:48.451097 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9b905fc177f4972520c21e7cfca7402f4ec2958051e09f353da013b9785c1fd5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 05 22:15:48 crc kubenswrapper[4910]: E0105 22:15:48.452678 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9b905fc177f4972520c21e7cfca7402f4ec2958051e09f353da013b9785c1fd5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 05 22:15:48 crc kubenswrapper[4910]: E0105 22:15:48.454997 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9b905fc177f4972520c21e7cfca7402f4ec2958051e09f353da013b9785c1fd5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 05 22:15:48 crc kubenswrapper[4910]: E0105 22:15:48.455036 4910 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-9g2kt" podUID="780aad6a-41ff-410c-a6fc-6be2faf38b6f" containerName="ovs-vswitchd" Jan 05 22:15:51 crc kubenswrapper[4910]: I0105 22:15:51.839971 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-9g2kt_780aad6a-41ff-410c-a6fc-6be2faf38b6f/ovs-vswitchd/0.log" Jan 05 22:15:51 crc kubenswrapper[4910]: I0105 22:15:51.841973 4910 generic.go:334] "Generic (PLEG): container finished" podID="780aad6a-41ff-410c-a6fc-6be2faf38b6f" containerID="9b905fc177f4972520c21e7cfca7402f4ec2958051e09f353da013b9785c1fd5" exitCode=137 Jan 05 22:15:51 crc kubenswrapper[4910]: I0105 22:15:51.842061 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-9g2kt" event={"ID":"780aad6a-41ff-410c-a6fc-6be2faf38b6f","Type":"ContainerDied","Data":"9b905fc177f4972520c21e7cfca7402f4ec2958051e09f353da013b9785c1fd5"} Jan 05 22:15:51 crc kubenswrapper[4910]: I0105 22:15:51.848356 4910 generic.go:334] "Generic (PLEG): container finished" podID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerID="9b00f43f9dea3110ed5f648eaaad264722a104ce4177678f5dfd3b49816ef94f" exitCode=137 Jan 05 22:15:51 crc kubenswrapper[4910]: I0105 22:15:51.848448 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c","Type":"ContainerDied","Data":"9b00f43f9dea3110ed5f648eaaad264722a104ce4177678f5dfd3b49816ef94f"} Jan 05 22:15:51 crc kubenswrapper[4910]: I0105 22:15:51.892661 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-9g2kt_780aad6a-41ff-410c-a6fc-6be2faf38b6f/ovs-vswitchd/0.log" Jan 05 22:15:51 crc kubenswrapper[4910]: I0105 22:15:51.893700 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-9g2kt" Jan 05 22:15:51 crc kubenswrapper[4910]: I0105 22:15:51.971070 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/780aad6a-41ff-410c-a6fc-6be2faf38b6f-etc-ovs\") pod \"780aad6a-41ff-410c-a6fc-6be2faf38b6f\" (UID: \"780aad6a-41ff-410c-a6fc-6be2faf38b6f\") " Jan 05 22:15:51 crc kubenswrapper[4910]: I0105 22:15:51.971221 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/780aad6a-41ff-410c-a6fc-6be2faf38b6f-etc-ovs" (OuterVolumeSpecName: "etc-ovs") pod "780aad6a-41ff-410c-a6fc-6be2faf38b6f" (UID: "780aad6a-41ff-410c-a6fc-6be2faf38b6f"). InnerVolumeSpecName "etc-ovs". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:15:51 crc kubenswrapper[4910]: I0105 22:15:51.971350 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hclzn\" (UniqueName: \"kubernetes.io/projected/780aad6a-41ff-410c-a6fc-6be2faf38b6f-kube-api-access-hclzn\") pod \"780aad6a-41ff-410c-a6fc-6be2faf38b6f\" (UID: \"780aad6a-41ff-410c-a6fc-6be2faf38b6f\") " Jan 05 22:15:51 crc kubenswrapper[4910]: I0105 22:15:51.971404 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/780aad6a-41ff-410c-a6fc-6be2faf38b6f-var-log\") pod \"780aad6a-41ff-410c-a6fc-6be2faf38b6f\" (UID: \"780aad6a-41ff-410c-a6fc-6be2faf38b6f\") " Jan 05 22:15:51 crc kubenswrapper[4910]: I0105 22:15:51.971472 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/780aad6a-41ff-410c-a6fc-6be2faf38b6f-var-lib\") pod \"780aad6a-41ff-410c-a6fc-6be2faf38b6f\" (UID: \"780aad6a-41ff-410c-a6fc-6be2faf38b6f\") " Jan 05 22:15:51 crc kubenswrapper[4910]: I0105 22:15:51.971473 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/780aad6a-41ff-410c-a6fc-6be2faf38b6f-var-log" (OuterVolumeSpecName: "var-log") pod "780aad6a-41ff-410c-a6fc-6be2faf38b6f" (UID: "780aad6a-41ff-410c-a6fc-6be2faf38b6f"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:15:51 crc kubenswrapper[4910]: I0105 22:15:51.971511 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/780aad6a-41ff-410c-a6fc-6be2faf38b6f-scripts\") pod \"780aad6a-41ff-410c-a6fc-6be2faf38b6f\" (UID: \"780aad6a-41ff-410c-a6fc-6be2faf38b6f\") " Jan 05 22:15:51 crc kubenswrapper[4910]: I0105 22:15:51.971533 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/780aad6a-41ff-410c-a6fc-6be2faf38b6f-var-lib" (OuterVolumeSpecName: "var-lib") pod "780aad6a-41ff-410c-a6fc-6be2faf38b6f" (UID: "780aad6a-41ff-410c-a6fc-6be2faf38b6f"). InnerVolumeSpecName "var-lib". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:15:51 crc kubenswrapper[4910]: I0105 22:15:51.971545 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/780aad6a-41ff-410c-a6fc-6be2faf38b6f-var-run\") pod \"780aad6a-41ff-410c-a6fc-6be2faf38b6f\" (UID: \"780aad6a-41ff-410c-a6fc-6be2faf38b6f\") " Jan 05 22:15:51 crc kubenswrapper[4910]: I0105 22:15:51.971733 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/780aad6a-41ff-410c-a6fc-6be2faf38b6f-var-run" (OuterVolumeSpecName: "var-run") pod "780aad6a-41ff-410c-a6fc-6be2faf38b6f" (UID: "780aad6a-41ff-410c-a6fc-6be2faf38b6f"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 22:15:51 crc kubenswrapper[4910]: I0105 22:15:51.971980 4910 reconciler_common.go:293] "Volume detached for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/780aad6a-41ff-410c-a6fc-6be2faf38b6f-var-lib\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:51 crc kubenswrapper[4910]: I0105 22:15:51.972004 4910 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/780aad6a-41ff-410c-a6fc-6be2faf38b6f-var-run\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:51 crc kubenswrapper[4910]: I0105 22:15:51.972015 4910 reconciler_common.go:293] "Volume detached for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/780aad6a-41ff-410c-a6fc-6be2faf38b6f-etc-ovs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:51 crc kubenswrapper[4910]: I0105 22:15:51.972023 4910 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/780aad6a-41ff-410c-a6fc-6be2faf38b6f-var-log\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:51 crc kubenswrapper[4910]: I0105 22:15:51.972827 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/780aad6a-41ff-410c-a6fc-6be2faf38b6f-scripts" (OuterVolumeSpecName: "scripts") pod "780aad6a-41ff-410c-a6fc-6be2faf38b6f" (UID: "780aad6a-41ff-410c-a6fc-6be2faf38b6f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:15:51 crc kubenswrapper[4910]: I0105 22:15:51.978689 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/780aad6a-41ff-410c-a6fc-6be2faf38b6f-kube-api-access-hclzn" (OuterVolumeSpecName: "kube-api-access-hclzn") pod "780aad6a-41ff-410c-a6fc-6be2faf38b6f" (UID: "780aad6a-41ff-410c-a6fc-6be2faf38b6f"). InnerVolumeSpecName "kube-api-access-hclzn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:51 crc kubenswrapper[4910]: I0105 22:15:51.981947 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.073387 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-cache\") pod \"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c\" (UID: \"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c\") " Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.073541 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swift\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c\" (UID: \"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c\") " Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.073565 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-etc-swift\") pod \"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c\" (UID: \"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c\") " Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.073600 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-lock\") pod \"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c\" (UID: \"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c\") " Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.074105 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-cache" (OuterVolumeSpecName: "cache") pod "4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" (UID: "4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c"). InnerVolumeSpecName "cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.074399 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b7m87\" (UniqueName: \"kubernetes.io/projected/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-kube-api-access-b7m87\") pod \"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c\" (UID: \"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c\") " Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.074477 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-lock" (OuterVolumeSpecName: "lock") pod "4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" (UID: "4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c"). InnerVolumeSpecName "lock". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.074788 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hclzn\" (UniqueName: \"kubernetes.io/projected/780aad6a-41ff-410c-a6fc-6be2faf38b6f-kube-api-access-hclzn\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.074804 4910 reconciler_common.go:293] "Volume detached for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-cache\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.074814 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/780aad6a-41ff-410c-a6fc-6be2faf38b6f-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.074822 4910 reconciler_common.go:293] "Volume detached for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-lock\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.077219 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-kube-api-access-b7m87" (OuterVolumeSpecName: "kube-api-access-b7m87") pod "4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" (UID: "4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c"). InnerVolumeSpecName "kube-api-access-b7m87". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.077256 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" (UID: "4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.078441 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "swift") pod "4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" (UID: "4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.175919 4910 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.175969 4910 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.175984 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b7m87\" (UniqueName: \"kubernetes.io/projected/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c-kube-api-access-b7m87\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.189930 4910 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.277313 4910 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.860918 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-9g2kt_780aad6a-41ff-410c-a6fc-6be2faf38b6f/ovs-vswitchd/0.log" Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.862922 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-9g2kt" event={"ID":"780aad6a-41ff-410c-a6fc-6be2faf38b6f","Type":"ContainerDied","Data":"0aa430ff91116b57d25f59680c6d3eabee0fd263542fdeaf8307fb0a292bc334"} Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.862950 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-9g2kt" Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.863274 4910 scope.go:117] "RemoveContainer" containerID="9b905fc177f4972520c21e7cfca7402f4ec2958051e09f353da013b9785c1fd5" Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.876399 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c","Type":"ContainerDied","Data":"65e5934656c083e3476166b6be1811539b0e5d926d9b6397c4b98cd4f297f842"} Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.876611 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.892028 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-9g2kt"] Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.909031 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-ovs-9g2kt"] Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.913280 4910 scope.go:117] "RemoveContainer" containerID="1156aec47bb3d84c95e2006625aff94e5b69e53e55d1398e4a2433f85e691338" Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.914393 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.920290 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-storage-0"] Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.939141 4910 scope.go:117] "RemoveContainer" containerID="05d94ba54d454e230e030748a2bf07bce00a32127cf9fc1e78a93b76e657c064" Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.967496 4910 scope.go:117] "RemoveContainer" containerID="9b00f43f9dea3110ed5f648eaaad264722a104ce4177678f5dfd3b49816ef94f" Jan 05 22:15:52 crc kubenswrapper[4910]: I0105 22:15:52.996934 4910 scope.go:117] "RemoveContainer" containerID="16a4b970b359fbc6fc563656363aa95e36f86df606367a79bbe2212753463870" Jan 05 22:15:53 crc kubenswrapper[4910]: I0105 22:15:53.016403 4910 scope.go:117] "RemoveContainer" containerID="831fd24da04e59f5338c337a32590b1382ff92df1e292249c377b504749c88e0" Jan 05 22:15:53 crc kubenswrapper[4910]: I0105 22:15:53.036941 4910 scope.go:117] "RemoveContainer" containerID="003b527dd2c8b643268b3cff916e9f4b6fbe1f8126957b42aa16a3434a320025" Jan 05 22:15:53 crc kubenswrapper[4910]: I0105 22:15:53.058608 4910 scope.go:117] "RemoveContainer" containerID="5739ed0c5ca6cc3d18fd000f35d28e3755ecb57c1feed925bbcdda9a4d46f763" Jan 05 22:15:53 crc kubenswrapper[4910]: I0105 22:15:53.079764 4910 scope.go:117] "RemoveContainer" containerID="5fa5b197746f4fc6c232971216c9a644e9ab975e961e0c935229cf38a4e633b6" Jan 05 22:15:53 crc kubenswrapper[4910]: I0105 22:15:53.101746 4910 scope.go:117] "RemoveContainer" containerID="1614de421c052452069aee80467540af7a4813e1f57aea4bdd99541595f16624" Jan 05 22:15:53 crc kubenswrapper[4910]: I0105 22:15:53.126298 4910 scope.go:117] "RemoveContainer" containerID="187beb52e9b46c05114bf6a7d8a6f124abb0c3eca374625c1e25c808968452b8" Jan 05 22:15:53 crc kubenswrapper[4910]: I0105 22:15:53.152979 4910 scope.go:117] "RemoveContainer" containerID="030ff0b9ff130c75ae6701e006d9210557f59ef3c03a7bb98a7ca430e97109c9" Jan 05 22:15:53 crc kubenswrapper[4910]: I0105 22:15:53.173550 4910 scope.go:117] "RemoveContainer" containerID="68e3c9997f2af7b46038842848451d18323b773c525c082d57ad2f0fb30df5ed" Jan 05 22:15:53 crc kubenswrapper[4910]: I0105 22:15:53.198708 4910 scope.go:117] "RemoveContainer" containerID="ab943cdf655ff0be681e72dca8f34c8ac3fb8d5e0e2a1b8ed872d453cb2ea0d6" Jan 05 22:15:53 crc kubenswrapper[4910]: I0105 22:15:53.222273 4910 scope.go:117] "RemoveContainer" containerID="2129598e625a213cd3ba79ba7fbea1e290f821367d76513b2babda344dd0d56d" Jan 05 22:15:53 crc kubenswrapper[4910]: I0105 22:15:53.243689 4910 scope.go:117] "RemoveContainer" containerID="e2ab8a8678a38130f2659e63954e48baae4462647b6604c3ae9b246a148b5a0e" Jan 05 22:15:53 crc kubenswrapper[4910]: I0105 22:15:53.261070 4910 scope.go:117] "RemoveContainer" containerID="0cd732b1f2842a6991bdbdcfda901598d9442e11547c32597338a8fa53a2b375" Jan 05 22:15:53 crc kubenswrapper[4910]: I0105 22:15:53.279790 4910 scope.go:117] "RemoveContainer" containerID="f2769fedd4f026dd164121600d29619d9faa807462e36ef2df370d00a00de88f" Jan 05 22:15:54 crc kubenswrapper[4910]: I0105 22:15:54.737532 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" path="/var/lib/kubelet/pods/4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c/volumes" Jan 05 22:15:54 crc kubenswrapper[4910]: I0105 22:15:54.741238 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="780aad6a-41ff-410c-a6fc-6be2faf38b6f" path="/var/lib/kubelet/pods/780aad6a-41ff-410c-a6fc-6be2faf38b6f/volumes" Jan 05 22:15:54 crc kubenswrapper[4910]: I0105 22:15:54.902168 4910 generic.go:334] "Generic (PLEG): container finished" podID="dc0e5b95-8658-440f-8771-c67a74098057" containerID="bc66ebaca647091cd90204976a51f83ad49f72d78ba556dafe73a5c164210302" exitCode=137 Jan 05 22:15:54 crc kubenswrapper[4910]: I0105 22:15:54.902264 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-d6c5d94b9-llc4f" event={"ID":"dc0e5b95-8658-440f-8771-c67a74098057","Type":"ContainerDied","Data":"bc66ebaca647091cd90204976a51f83ad49f72d78ba556dafe73a5c164210302"} Jan 05 22:15:55 crc kubenswrapper[4910]: I0105 22:15:55.149865 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-d6c5d94b9-llc4f" Jan 05 22:15:55 crc kubenswrapper[4910]: I0105 22:15:55.220793 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dc0e5b95-8658-440f-8771-c67a74098057-logs\") pod \"dc0e5b95-8658-440f-8771-c67a74098057\" (UID: \"dc0e5b95-8658-440f-8771-c67a74098057\") " Jan 05 22:15:55 crc kubenswrapper[4910]: I0105 22:15:55.220940 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gc875\" (UniqueName: \"kubernetes.io/projected/dc0e5b95-8658-440f-8771-c67a74098057-kube-api-access-gc875\") pod \"dc0e5b95-8658-440f-8771-c67a74098057\" (UID: \"dc0e5b95-8658-440f-8771-c67a74098057\") " Jan 05 22:15:55 crc kubenswrapper[4910]: I0105 22:15:55.221007 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dc0e5b95-8658-440f-8771-c67a74098057-config-data-custom\") pod \"dc0e5b95-8658-440f-8771-c67a74098057\" (UID: \"dc0e5b95-8658-440f-8771-c67a74098057\") " Jan 05 22:15:55 crc kubenswrapper[4910]: I0105 22:15:55.221101 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dc0e5b95-8658-440f-8771-c67a74098057-config-data\") pod \"dc0e5b95-8658-440f-8771-c67a74098057\" (UID: \"dc0e5b95-8658-440f-8771-c67a74098057\") " Jan 05 22:15:55 crc kubenswrapper[4910]: I0105 22:15:55.221155 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc0e5b95-8658-440f-8771-c67a74098057-combined-ca-bundle\") pod \"dc0e5b95-8658-440f-8771-c67a74098057\" (UID: \"dc0e5b95-8658-440f-8771-c67a74098057\") " Jan 05 22:15:55 crc kubenswrapper[4910]: I0105 22:15:55.221514 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dc0e5b95-8658-440f-8771-c67a74098057-logs" (OuterVolumeSpecName: "logs") pod "dc0e5b95-8658-440f-8771-c67a74098057" (UID: "dc0e5b95-8658-440f-8771-c67a74098057"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:15:55 crc kubenswrapper[4910]: I0105 22:15:55.227252 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc0e5b95-8658-440f-8771-c67a74098057-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "dc0e5b95-8658-440f-8771-c67a74098057" (UID: "dc0e5b95-8658-440f-8771-c67a74098057"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:55 crc kubenswrapper[4910]: I0105 22:15:55.227347 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc0e5b95-8658-440f-8771-c67a74098057-kube-api-access-gc875" (OuterVolumeSpecName: "kube-api-access-gc875") pod "dc0e5b95-8658-440f-8771-c67a74098057" (UID: "dc0e5b95-8658-440f-8771-c67a74098057"). InnerVolumeSpecName "kube-api-access-gc875". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:55 crc kubenswrapper[4910]: I0105 22:15:55.245699 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc0e5b95-8658-440f-8771-c67a74098057-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dc0e5b95-8658-440f-8771-c67a74098057" (UID: "dc0e5b95-8658-440f-8771-c67a74098057"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:55 crc kubenswrapper[4910]: I0105 22:15:55.262197 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc0e5b95-8658-440f-8771-c67a74098057-config-data" (OuterVolumeSpecName: "config-data") pod "dc0e5b95-8658-440f-8771-c67a74098057" (UID: "dc0e5b95-8658-440f-8771-c67a74098057"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:55 crc kubenswrapper[4910]: I0105 22:15:55.323388 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dc0e5b95-8658-440f-8771-c67a74098057-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:55 crc kubenswrapper[4910]: I0105 22:15:55.323428 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc0e5b95-8658-440f-8771-c67a74098057-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:55 crc kubenswrapper[4910]: I0105 22:15:55.323442 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dc0e5b95-8658-440f-8771-c67a74098057-logs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:55 crc kubenswrapper[4910]: I0105 22:15:55.323457 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gc875\" (UniqueName: \"kubernetes.io/projected/dc0e5b95-8658-440f-8771-c67a74098057-kube-api-access-gc875\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:55 crc kubenswrapper[4910]: I0105 22:15:55.323467 4910 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dc0e5b95-8658-440f-8771-c67a74098057-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:55 crc kubenswrapper[4910]: I0105 22:15:55.914395 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-d6c5d94b9-llc4f" event={"ID":"dc0e5b95-8658-440f-8771-c67a74098057","Type":"ContainerDied","Data":"ecf35f7f9d49d337aabbfdf38a5e83601e140aff6e3b0beb4c263e559fcce34f"} Jan 05 22:15:55 crc kubenswrapper[4910]: I0105 22:15:55.914797 4910 scope.go:117] "RemoveContainer" containerID="bc66ebaca647091cd90204976a51f83ad49f72d78ba556dafe73a5c164210302" Jan 05 22:15:55 crc kubenswrapper[4910]: I0105 22:15:55.914461 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-d6c5d94b9-llc4f" Jan 05 22:15:55 crc kubenswrapper[4910]: I0105 22:15:55.937079 4910 scope.go:117] "RemoveContainer" containerID="dd028682666330bff13245ee6ff70f7e9c71b736d8ba15cebcc6d55a428021f4" Jan 05 22:15:55 crc kubenswrapper[4910]: I0105 22:15:55.953887 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-d6c5d94b9-llc4f"] Jan 05 22:15:55 crc kubenswrapper[4910]: I0105 22:15:55.962444 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-worker-d6c5d94b9-llc4f"] Jan 05 22:15:56 crc kubenswrapper[4910]: I0105 22:15:56.426872 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-787f96fcd6-44r4b" Jan 05 22:15:56 crc kubenswrapper[4910]: I0105 22:15:56.542988 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nncgv\" (UniqueName: \"kubernetes.io/projected/244c7b09-d3d9-4ae7-864b-ff6758b0de6a-kube-api-access-nncgv\") pod \"244c7b09-d3d9-4ae7-864b-ff6758b0de6a\" (UID: \"244c7b09-d3d9-4ae7-864b-ff6758b0de6a\") " Jan 05 22:15:56 crc kubenswrapper[4910]: I0105 22:15:56.543181 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/244c7b09-d3d9-4ae7-864b-ff6758b0de6a-config-data-custom\") pod \"244c7b09-d3d9-4ae7-864b-ff6758b0de6a\" (UID: \"244c7b09-d3d9-4ae7-864b-ff6758b0de6a\") " Jan 05 22:15:56 crc kubenswrapper[4910]: I0105 22:15:56.543259 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/244c7b09-d3d9-4ae7-864b-ff6758b0de6a-combined-ca-bundle\") pod \"244c7b09-d3d9-4ae7-864b-ff6758b0de6a\" (UID: \"244c7b09-d3d9-4ae7-864b-ff6758b0de6a\") " Jan 05 22:15:56 crc kubenswrapper[4910]: I0105 22:15:56.543289 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/244c7b09-d3d9-4ae7-864b-ff6758b0de6a-logs\") pod \"244c7b09-d3d9-4ae7-864b-ff6758b0de6a\" (UID: \"244c7b09-d3d9-4ae7-864b-ff6758b0de6a\") " Jan 05 22:15:56 crc kubenswrapper[4910]: I0105 22:15:56.543350 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/244c7b09-d3d9-4ae7-864b-ff6758b0de6a-config-data\") pod \"244c7b09-d3d9-4ae7-864b-ff6758b0de6a\" (UID: \"244c7b09-d3d9-4ae7-864b-ff6758b0de6a\") " Jan 05 22:15:56 crc kubenswrapper[4910]: I0105 22:15:56.543713 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/244c7b09-d3d9-4ae7-864b-ff6758b0de6a-logs" (OuterVolumeSpecName: "logs") pod "244c7b09-d3d9-4ae7-864b-ff6758b0de6a" (UID: "244c7b09-d3d9-4ae7-864b-ff6758b0de6a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:15:56 crc kubenswrapper[4910]: I0105 22:15:56.549032 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/244c7b09-d3d9-4ae7-864b-ff6758b0de6a-kube-api-access-nncgv" (OuterVolumeSpecName: "kube-api-access-nncgv") pod "244c7b09-d3d9-4ae7-864b-ff6758b0de6a" (UID: "244c7b09-d3d9-4ae7-864b-ff6758b0de6a"). InnerVolumeSpecName "kube-api-access-nncgv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:15:56 crc kubenswrapper[4910]: I0105 22:15:56.550990 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/244c7b09-d3d9-4ae7-864b-ff6758b0de6a-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "244c7b09-d3d9-4ae7-864b-ff6758b0de6a" (UID: "244c7b09-d3d9-4ae7-864b-ff6758b0de6a"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:56 crc kubenswrapper[4910]: I0105 22:15:56.567381 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/244c7b09-d3d9-4ae7-864b-ff6758b0de6a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "244c7b09-d3d9-4ae7-864b-ff6758b0de6a" (UID: "244c7b09-d3d9-4ae7-864b-ff6758b0de6a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:56 crc kubenswrapper[4910]: I0105 22:15:56.582306 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/244c7b09-d3d9-4ae7-864b-ff6758b0de6a-config-data" (OuterVolumeSpecName: "config-data") pod "244c7b09-d3d9-4ae7-864b-ff6758b0de6a" (UID: "244c7b09-d3d9-4ae7-864b-ff6758b0de6a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:15:56 crc kubenswrapper[4910]: I0105 22:15:56.645042 4910 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/244c7b09-d3d9-4ae7-864b-ff6758b0de6a-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:56 crc kubenswrapper[4910]: I0105 22:15:56.645084 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/244c7b09-d3d9-4ae7-864b-ff6758b0de6a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:56 crc kubenswrapper[4910]: I0105 22:15:56.645094 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/244c7b09-d3d9-4ae7-864b-ff6758b0de6a-logs\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:56 crc kubenswrapper[4910]: I0105 22:15:56.645105 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/244c7b09-d3d9-4ae7-864b-ff6758b0de6a-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:56 crc kubenswrapper[4910]: I0105 22:15:56.645114 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nncgv\" (UniqueName: \"kubernetes.io/projected/244c7b09-d3d9-4ae7-864b-ff6758b0de6a-kube-api-access-nncgv\") on node \"crc\" DevicePath \"\"" Jan 05 22:15:56 crc kubenswrapper[4910]: I0105 22:15:56.740447 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc0e5b95-8658-440f-8771-c67a74098057" path="/var/lib/kubelet/pods/dc0e5b95-8658-440f-8771-c67a74098057/volumes" Jan 05 22:15:56 crc kubenswrapper[4910]: I0105 22:15:56.814294 4910 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod19d63cd6-26c3-439b-a9f6-5a53f27d9e0e"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod19d63cd6-26c3-439b-a9f6-5a53f27d9e0e] : Timed out while waiting for systemd to remove kubepods-besteffort-pod19d63cd6_26c3_439b_a9f6_5a53f27d9e0e.slice" Jan 05 22:15:56 crc kubenswrapper[4910]: I0105 22:15:56.933288 4910 generic.go:334] "Generic (PLEG): container finished" podID="244c7b09-d3d9-4ae7-864b-ff6758b0de6a" containerID="54383dd13947802c3b16a778eae08327fa8bdad5b46b80e000c52afe19c07cdc" exitCode=137 Jan 05 22:15:56 crc kubenswrapper[4910]: I0105 22:15:56.933336 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-787f96fcd6-44r4b" event={"ID":"244c7b09-d3d9-4ae7-864b-ff6758b0de6a","Type":"ContainerDied","Data":"54383dd13947802c3b16a778eae08327fa8bdad5b46b80e000c52afe19c07cdc"} Jan 05 22:15:56 crc kubenswrapper[4910]: I0105 22:15:56.933365 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-787f96fcd6-44r4b" event={"ID":"244c7b09-d3d9-4ae7-864b-ff6758b0de6a","Type":"ContainerDied","Data":"2d3d0b5d3ef3473593653705d6b414ce761afb9c29ef45bc74b7b13a9dca29d4"} Jan 05 22:15:56 crc kubenswrapper[4910]: I0105 22:15:56.933381 4910 scope.go:117] "RemoveContainer" containerID="54383dd13947802c3b16a778eae08327fa8bdad5b46b80e000c52afe19c07cdc" Jan 05 22:15:56 crc kubenswrapper[4910]: I0105 22:15:56.933469 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-787f96fcd6-44r4b" Jan 05 22:15:56 crc kubenswrapper[4910]: I0105 22:15:56.959497 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-787f96fcd6-44r4b"] Jan 05 22:15:56 crc kubenswrapper[4910]: I0105 22:15:56.967066 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-keystone-listener-787f96fcd6-44r4b"] Jan 05 22:15:56 crc kubenswrapper[4910]: I0105 22:15:56.971890 4910 scope.go:117] "RemoveContainer" containerID="5f5f3c9fc2640058d48c86ff68f7c8aa4847482f6977cae797955ee8c5bef11c" Jan 05 22:15:57 crc kubenswrapper[4910]: I0105 22:15:57.000589 4910 scope.go:117] "RemoveContainer" containerID="54383dd13947802c3b16a778eae08327fa8bdad5b46b80e000c52afe19c07cdc" Jan 05 22:15:57 crc kubenswrapper[4910]: E0105 22:15:57.001226 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"54383dd13947802c3b16a778eae08327fa8bdad5b46b80e000c52afe19c07cdc\": container with ID starting with 54383dd13947802c3b16a778eae08327fa8bdad5b46b80e000c52afe19c07cdc not found: ID does not exist" containerID="54383dd13947802c3b16a778eae08327fa8bdad5b46b80e000c52afe19c07cdc" Jan 05 22:15:57 crc kubenswrapper[4910]: I0105 22:15:57.001278 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54383dd13947802c3b16a778eae08327fa8bdad5b46b80e000c52afe19c07cdc"} err="failed to get container status \"54383dd13947802c3b16a778eae08327fa8bdad5b46b80e000c52afe19c07cdc\": rpc error: code = NotFound desc = could not find container \"54383dd13947802c3b16a778eae08327fa8bdad5b46b80e000c52afe19c07cdc\": container with ID starting with 54383dd13947802c3b16a778eae08327fa8bdad5b46b80e000c52afe19c07cdc not found: ID does not exist" Jan 05 22:15:57 crc kubenswrapper[4910]: I0105 22:15:57.001309 4910 scope.go:117] "RemoveContainer" containerID="5f5f3c9fc2640058d48c86ff68f7c8aa4847482f6977cae797955ee8c5bef11c" Jan 05 22:15:57 crc kubenswrapper[4910]: E0105 22:15:57.002005 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5f5f3c9fc2640058d48c86ff68f7c8aa4847482f6977cae797955ee8c5bef11c\": container with ID starting with 5f5f3c9fc2640058d48c86ff68f7c8aa4847482f6977cae797955ee8c5bef11c not found: ID does not exist" containerID="5f5f3c9fc2640058d48c86ff68f7c8aa4847482f6977cae797955ee8c5bef11c" Jan 05 22:15:57 crc kubenswrapper[4910]: I0105 22:15:57.002031 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f5f3c9fc2640058d48c86ff68f7c8aa4847482f6977cae797955ee8c5bef11c"} err="failed to get container status \"5f5f3c9fc2640058d48c86ff68f7c8aa4847482f6977cae797955ee8c5bef11c\": rpc error: code = NotFound desc = could not find container \"5f5f3c9fc2640058d48c86ff68f7c8aa4847482f6977cae797955ee8c5bef11c\": container with ID starting with 5f5f3c9fc2640058d48c86ff68f7c8aa4847482f6977cae797955ee8c5bef11c not found: ID does not exist" Jan 05 22:15:58 crc kubenswrapper[4910]: I0105 22:15:58.731719 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="244c7b09-d3d9-4ae7-864b-ff6758b0de6a" path="/var/lib/kubelet/pods/244c7b09-d3d9-4ae7-864b-ff6758b0de6a/volumes" Jan 05 22:16:40 crc kubenswrapper[4910]: I0105 22:16:40.893269 4910 scope.go:117] "RemoveContainer" containerID="5e1d20fa64749e78dd0848923915f08c98945b04126d6cc3dc10a5ec7b2cb5d2" Jan 05 22:16:40 crc kubenswrapper[4910]: I0105 22:16:40.933828 4910 scope.go:117] "RemoveContainer" containerID="1a0912f44fb9243b157508f54544929bbefbd015fb5658e4e03f1dad4ed2596a" Jan 05 22:16:40 crc kubenswrapper[4910]: I0105 22:16:40.966546 4910 scope.go:117] "RemoveContainer" containerID="c3d8f1f1aad1dd8d9445060302319a5bd2edb7b74f4c04887e27e4076c5c28f8" Jan 05 22:16:41 crc kubenswrapper[4910]: I0105 22:16:41.011880 4910 scope.go:117] "RemoveContainer" containerID="f0d59393ce7eea23f9ee13b4d3b5d8217da3802278d0f455c997ce0a2839f1c6" Jan 05 22:16:41 crc kubenswrapper[4910]: I0105 22:16:41.037228 4910 scope.go:117] "RemoveContainer" containerID="93b46b88e064324b14c7d00a03e5638202dad25e6acbc02574f66c4dabeb04cf" Jan 05 22:16:41 crc kubenswrapper[4910]: I0105 22:16:41.056012 4910 scope.go:117] "RemoveContainer" containerID="205d4b46e7bd413f6db4358dc1c3d1c4cb04c634807f6f47aed239d5fff6aad0" Jan 05 22:16:41 crc kubenswrapper[4910]: I0105 22:16:41.094900 4910 scope.go:117] "RemoveContainer" containerID="99653dd93531799ba7907205bd0f49ec9509e31c306740ccb50671ea23797c38" Jan 05 22:16:41 crc kubenswrapper[4910]: I0105 22:16:41.114846 4910 scope.go:117] "RemoveContainer" containerID="43d64ba74438276a1c444b8b716c3a5435851a1a1454c5658c6993d9ff542c25" Jan 05 22:16:41 crc kubenswrapper[4910]: I0105 22:16:41.135684 4910 scope.go:117] "RemoveContainer" containerID="1bd09319b9d11bdb43c8058ccf667b97b6c47dd45db7f7355efe7383386b8570" Jan 05 22:16:41 crc kubenswrapper[4910]: I0105 22:16:41.154810 4910 scope.go:117] "RemoveContainer" containerID="1f73c1d8d2fa0e648b4c56ae4c50cff94f88b4a2a3829bb9ebdda57b47083e6c" Jan 05 22:16:41 crc kubenswrapper[4910]: I0105 22:16:41.172617 4910 scope.go:117] "RemoveContainer" containerID="4cd7dca0dfe15583bcba23955d426ace279334ab34815807ea04f47c94f20ebd" Jan 05 22:16:41 crc kubenswrapper[4910]: I0105 22:16:41.190739 4910 scope.go:117] "RemoveContainer" containerID="a5b95b626e7d918635b41ca0b7a9ee9cb24d913e67ab2531f8e4eae329e3f286" Jan 05 22:16:41 crc kubenswrapper[4910]: I0105 22:16:41.209236 4910 scope.go:117] "RemoveContainer" containerID="e5c1bbf0050b8e5bf2be2b1af4a1f27408a68c84eca4d7f73bc456d12e9f2191" Jan 05 22:16:41 crc kubenswrapper[4910]: I0105 22:16:41.226322 4910 scope.go:117] "RemoveContainer" containerID="90ac5d36dc8692f0d0c91b15160638e2e13a408a70e90435dcbe90ab33a57fe9" Jan 05 22:16:41 crc kubenswrapper[4910]: I0105 22:16:41.249136 4910 scope.go:117] "RemoveContainer" containerID="5c5f0586368b8c9a50edd5e1d2baab3ab73b86e4b18876168fd793ac4c0ba552" Jan 05 22:16:41 crc kubenswrapper[4910]: I0105 22:16:41.269730 4910 scope.go:117] "RemoveContainer" containerID="3f94cee1613676c311b9e111ce3fc8d4a4347ded3807c06e278897a8efd59845" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.385834 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-lzjch"] Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387058 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de8aafdf-9b35-4c41-8726-6c7e86edee5f" containerName="nova-cell0-conductor-conductor" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387078 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="de8aafdf-9b35-4c41-8726-6c7e86edee5f" containerName="nova-cell0-conductor-conductor" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387100 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="object-server" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387109 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="object-server" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387142 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="account-reaper" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387151 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="account-reaper" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387166 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83319bb4-7278-49b3-8ef2-beb8baa0a1a6" containerName="nova-scheduler-scheduler" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387174 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="83319bb4-7278-49b3-8ef2-beb8baa0a1a6" containerName="nova-scheduler-scheduler" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387183 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d881977-4280-42f6-8ec5-65be97c8dc28" containerName="sg-core" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387191 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d881977-4280-42f6-8ec5-65be97c8dc28" containerName="sg-core" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387202 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b" containerName="barbican-keystone-listener-log" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387209 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b" containerName="barbican-keystone-listener-log" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387217 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d881977-4280-42f6-8ec5-65be97c8dc28" containerName="ceilometer-notification-agent" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387225 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d881977-4280-42f6-8ec5-65be97c8dc28" containerName="ceilometer-notification-agent" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387240 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="swift-recon-cron" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387249 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="swift-recon-cron" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387257 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b29bf6bd-079e-4e8b-bec6-49d4923676af" containerName="placement-log" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387264 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b29bf6bd-079e-4e8b-bec6-49d4923676af" containerName="placement-log" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387272 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9cedfb5-8c45-434f-b04d-694bf6d600b8" containerName="setup-container" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387280 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9cedfb5-8c45-434f-b04d-694bf6d600b8" containerName="setup-container" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387291 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f43d30e-14e4-4978-bb02-a251305f9330" containerName="glance-log" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387299 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f43d30e-14e4-4978-bb02-a251305f9330" containerName="glance-log" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387312 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="object-expirer" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387318 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="object-expirer" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387329 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3486557d-93f8-44c2-b40a-dd8aca19d8e1" containerName="nova-metadata-log" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387337 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="3486557d-93f8-44c2-b40a-dd8aca19d8e1" containerName="nova-metadata-log" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387348 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="227b48c0-2e23-4048-8fb5-21628bd9e5e0" containerName="neutron-httpd" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387357 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="227b48c0-2e23-4048-8fb5-21628bd9e5e0" containerName="neutron-httpd" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387368 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc0e5b95-8658-440f-8771-c67a74098057" containerName="barbican-worker" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387376 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc0e5b95-8658-440f-8771-c67a74098057" containerName="barbican-worker" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387389 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="account-replicator" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387397 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="account-replicator" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387409 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="227b48c0-2e23-4048-8fb5-21628bd9e5e0" containerName="neutron-api" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387416 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="227b48c0-2e23-4048-8fb5-21628bd9e5e0" containerName="neutron-api" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387426 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce8ea9ec-e799-457a-aaca-e16b591bdf0c" containerName="barbican-worker-log" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387433 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce8ea9ec-e799-457a-aaca-e16b591bdf0c" containerName="barbican-worker-log" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387442 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cb18efe-a80d-4657-921d-af4a18ae279d" containerName="mysql-bootstrap" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387450 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cb18efe-a80d-4657-921d-af4a18ae279d" containerName="mysql-bootstrap" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387461 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="account-auditor" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387469 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="account-auditor" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387482 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="container-replicator" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387489 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="container-replicator" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387502 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="container-auditor" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387511 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="container-auditor" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387522 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce8ea9ec-e799-457a-aaca-e16b591bdf0c" containerName="barbican-worker" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387529 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce8ea9ec-e799-457a-aaca-e16b591bdf0c" containerName="barbican-worker" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387537 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cb18efe-a80d-4657-921d-af4a18ae279d" containerName="galera" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387544 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cb18efe-a80d-4657-921d-af4a18ae279d" containerName="galera" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387551 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="rsync" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387558 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="rsync" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387571 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="object-replicator" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387578 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="object-replicator" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387587 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="object-updater" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387599 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="object-updater" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387611 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf7e2b20-58e5-4c61-9e50-c1af51acf521" containerName="nova-api-log" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387618 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf7e2b20-58e5-4c61-9e50-c1af51acf521" containerName="nova-api-log" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387632 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="account-server" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387640 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="account-server" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387650 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b" containerName="barbican-keystone-listener" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387658 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b" containerName="barbican-keystone-listener" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387670 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="object-auditor" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387677 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="object-auditor" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387690 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="244c7b09-d3d9-4ae7-864b-ff6758b0de6a" containerName="barbican-keystone-listener" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387697 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="244c7b09-d3d9-4ae7-864b-ff6758b0de6a" containerName="barbican-keystone-listener" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387709 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="container-server" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387716 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="container-server" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387726 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3486557d-93f8-44c2-b40a-dd8aca19d8e1" containerName="nova-metadata-metadata" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387733 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="3486557d-93f8-44c2-b40a-dd8aca19d8e1" containerName="nova-metadata-metadata" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387744 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="244c7b09-d3d9-4ae7-864b-ff6758b0de6a" containerName="barbican-keystone-listener-log" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387751 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="244c7b09-d3d9-4ae7-864b-ff6758b0de6a" containerName="barbican-keystone-listener-log" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387760 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b651f520-1463-434f-b16f-edd2b1b8f8d9" containerName="kube-state-metrics" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387768 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b651f520-1463-434f-b16f-edd2b1b8f8d9" containerName="kube-state-metrics" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387777 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f43d30e-14e4-4978-bb02-a251305f9330" containerName="glance-httpd" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387785 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f43d30e-14e4-4978-bb02-a251305f9330" containerName="glance-httpd" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387799 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="780aad6a-41ff-410c-a6fc-6be2faf38b6f" containerName="ovsdb-server" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387806 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="780aad6a-41ff-410c-a6fc-6be2faf38b6f" containerName="ovsdb-server" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387817 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf7e2b20-58e5-4c61-9e50-c1af51acf521" containerName="nova-api-api" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387824 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf7e2b20-58e5-4c61-9e50-c1af51acf521" containerName="nova-api-api" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387834 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97c873ec-c28a-4121-bac2-98b49c6b42a0" containerName="keystone-api" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387841 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="97c873ec-c28a-4121-bac2-98b49c6b42a0" containerName="keystone-api" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387854 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="container-updater" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387861 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="container-updater" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387868 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70694d65-fa64-4667-b1aa-bac50650687c" containerName="nova-cell1-conductor-conductor" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387877 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="70694d65-fa64-4667-b1aa-bac50650687c" containerName="nova-cell1-conductor-conductor" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387884 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d881977-4280-42f6-8ec5-65be97c8dc28" containerName="proxy-httpd" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387891 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d881977-4280-42f6-8ec5-65be97c8dc28" containerName="proxy-httpd" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387902 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9cedfb5-8c45-434f-b04d-694bf6d600b8" containerName="rabbitmq" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387910 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9cedfb5-8c45-434f-b04d-694bf6d600b8" containerName="rabbitmq" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387923 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70100901-0709-4900-ac75-462a85b350c3" containerName="glance-httpd" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387932 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="70100901-0709-4900-ac75-462a85b350c3" containerName="glance-httpd" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.387942 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="780aad6a-41ff-410c-a6fc-6be2faf38b6f" containerName="ovsdb-server-init" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.387950 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="780aad6a-41ff-410c-a6fc-6be2faf38b6f" containerName="ovsdb-server-init" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.388919 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d881977-4280-42f6-8ec5-65be97c8dc28" containerName="ceilometer-central-agent" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.388933 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d881977-4280-42f6-8ec5-65be97c8dc28" containerName="ceilometer-central-agent" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.388951 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70100901-0709-4900-ac75-462a85b350c3" containerName="glance-log" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.388958 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="70100901-0709-4900-ac75-462a85b350c3" containerName="glance-log" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.388967 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e2a3efd-2de7-493e-af91-900b224e5313" containerName="setup-container" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.388975 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e2a3efd-2de7-493e-af91-900b224e5313" containerName="setup-container" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.388984 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b29bf6bd-079e-4e8b-bec6-49d4923676af" containerName="placement-api" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.388991 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b29bf6bd-079e-4e8b-bec6-49d4923676af" containerName="placement-api" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.389003 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07efd759-c536-425d-938e-a8ccd41706cd" containerName="cinder-api-log" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389009 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="07efd759-c536-425d-938e-a8ccd41706cd" containerName="cinder-api-log" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.389018 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39608078-4c49-4ca6-b9d4-6cdd37d89f91" containerName="memcached" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389025 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="39608078-4c49-4ca6-b9d4-6cdd37d89f91" containerName="memcached" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.389033 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45acd92f-2e5d-4fc1-8b91-c91f165e786a" containerName="barbican-api-log" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389039 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="45acd92f-2e5d-4fc1-8b91-c91f165e786a" containerName="barbican-api-log" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.389099 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc0e5b95-8658-440f-8771-c67a74098057" containerName="barbican-worker-log" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389107 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc0e5b95-8658-440f-8771-c67a74098057" containerName="barbican-worker-log" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.389132 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="780aad6a-41ff-410c-a6fc-6be2faf38b6f" containerName="ovs-vswitchd" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389139 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="780aad6a-41ff-410c-a6fc-6be2faf38b6f" containerName="ovs-vswitchd" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.389152 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45acd92f-2e5d-4fc1-8b91-c91f165e786a" containerName="barbican-api" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389158 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="45acd92f-2e5d-4fc1-8b91-c91f165e786a" containerName="barbican-api" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.389167 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e2a3efd-2de7-493e-af91-900b224e5313" containerName="rabbitmq" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389175 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e2a3efd-2de7-493e-af91-900b224e5313" containerName="rabbitmq" Jan 05 22:17:15 crc kubenswrapper[4910]: E0105 22:17:15.389185 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07efd759-c536-425d-938e-a8ccd41706cd" containerName="cinder-api" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389192 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="07efd759-c536-425d-938e-a8ccd41706cd" containerName="cinder-api" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389374 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d881977-4280-42f6-8ec5-65be97c8dc28" containerName="proxy-httpd" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389393 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="container-server" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389407 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="object-auditor" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389421 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d881977-4280-42f6-8ec5-65be97c8dc28" containerName="ceilometer-notification-agent" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389435 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="swift-recon-cron" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389444 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="3486557d-93f8-44c2-b40a-dd8aca19d8e1" containerName="nova-metadata-log" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389457 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="780aad6a-41ff-410c-a6fc-6be2faf38b6f" containerName="ovs-vswitchd" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389470 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce8ea9ec-e799-457a-aaca-e16b591bdf0c" containerName="barbican-worker-log" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389480 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d881977-4280-42f6-8ec5-65be97c8dc28" containerName="ceilometer-central-agent" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389491 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b" containerName="barbican-keystone-listener-log" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389505 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="container-replicator" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389518 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="07efd759-c536-425d-938e-a8ccd41706cd" containerName="cinder-api" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389529 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="70100901-0709-4900-ac75-462a85b350c3" containerName="glance-log" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389539 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="780aad6a-41ff-410c-a6fc-6be2faf38b6f" containerName="ovsdb-server" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389551 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="object-replicator" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389560 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="227b48c0-2e23-4048-8fb5-21628bd9e5e0" containerName="neutron-httpd" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389572 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="97c873ec-c28a-4121-bac2-98b49c6b42a0" containerName="keystone-api" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389584 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="45acd92f-2e5d-4fc1-8b91-c91f165e786a" containerName="barbican-api-log" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389596 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="244c7b09-d3d9-4ae7-864b-ff6758b0de6a" containerName="barbican-keystone-listener-log" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389605 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce8ea9ec-e799-457a-aaca-e16b591bdf0c" containerName="barbican-worker" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389617 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="cafbb8bc-7120-4c64-aa9d-18fe4ce9e58b" containerName="barbican-keystone-listener" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389626 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="07efd759-c536-425d-938e-a8ccd41706cd" containerName="cinder-api-log" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389634 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="70100901-0709-4900-ac75-462a85b350c3" containerName="glance-httpd" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389642 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="2cb18efe-a80d-4657-921d-af4a18ae279d" containerName="galera" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389648 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="b29bf6bd-079e-4e8b-bec6-49d4923676af" containerName="placement-log" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389657 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="244c7b09-d3d9-4ae7-864b-ff6758b0de6a" containerName="barbican-keystone-listener" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389670 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9cedfb5-8c45-434f-b04d-694bf6d600b8" containerName="rabbitmq" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389679 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="b29bf6bd-079e-4e8b-bec6-49d4923676af" containerName="placement-api" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389689 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="account-reaper" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389702 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="object-expirer" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389712 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="3486557d-93f8-44c2-b40a-dd8aca19d8e1" containerName="nova-metadata-metadata" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389722 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="rsync" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389734 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="account-server" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389741 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="b651f520-1463-434f-b16f-edd2b1b8f8d9" containerName="kube-state-metrics" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389752 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d881977-4280-42f6-8ec5-65be97c8dc28" containerName="sg-core" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389762 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f43d30e-14e4-4978-bb02-a251305f9330" containerName="glance-log" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389771 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f43d30e-14e4-4978-bb02-a251305f9330" containerName="glance-httpd" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389781 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="45acd92f-2e5d-4fc1-8b91-c91f165e786a" containerName="barbican-api" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389792 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="account-replicator" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389802 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="83319bb4-7278-49b3-8ef2-beb8baa0a1a6" containerName="nova-scheduler-scheduler" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389814 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="227b48c0-2e23-4048-8fb5-21628bd9e5e0" containerName="neutron-api" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389824 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="container-updater" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389833 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="70694d65-fa64-4667-b1aa-bac50650687c" containerName="nova-cell1-conductor-conductor" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389846 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="object-updater" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389857 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="object-server" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389867 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="container-auditor" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389877 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e2a3efd-2de7-493e-af91-900b224e5313" containerName="rabbitmq" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389885 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4aa2e70b-9eb7-4ea1-9fdb-2687a340ba9c" containerName="account-auditor" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389898 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc0e5b95-8658-440f-8771-c67a74098057" containerName="barbican-worker-log" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389909 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="39608078-4c49-4ca6-b9d4-6cdd37d89f91" containerName="memcached" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389918 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="de8aafdf-9b35-4c41-8726-6c7e86edee5f" containerName="nova-cell0-conductor-conductor" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389928 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc0e5b95-8658-440f-8771-c67a74098057" containerName="barbican-worker" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389938 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf7e2b20-58e5-4c61-9e50-c1af51acf521" containerName="nova-api-log" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.389948 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf7e2b20-58e5-4c61-9e50-c1af51acf521" containerName="nova-api-api" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.391243 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lzjch" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.400934 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lzjch"] Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.474378 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc8751fc-4055-4de4-ae5b-d9a523a01abc-catalog-content\") pod \"community-operators-lzjch\" (UID: \"dc8751fc-4055-4de4-ae5b-d9a523a01abc\") " pod="openshift-marketplace/community-operators-lzjch" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.474472 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9lm6n\" (UniqueName: \"kubernetes.io/projected/dc8751fc-4055-4de4-ae5b-d9a523a01abc-kube-api-access-9lm6n\") pod \"community-operators-lzjch\" (UID: \"dc8751fc-4055-4de4-ae5b-d9a523a01abc\") " pod="openshift-marketplace/community-operators-lzjch" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.474714 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc8751fc-4055-4de4-ae5b-d9a523a01abc-utilities\") pod \"community-operators-lzjch\" (UID: \"dc8751fc-4055-4de4-ae5b-d9a523a01abc\") " pod="openshift-marketplace/community-operators-lzjch" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.576785 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc8751fc-4055-4de4-ae5b-d9a523a01abc-catalog-content\") pod \"community-operators-lzjch\" (UID: \"dc8751fc-4055-4de4-ae5b-d9a523a01abc\") " pod="openshift-marketplace/community-operators-lzjch" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.576877 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9lm6n\" (UniqueName: \"kubernetes.io/projected/dc8751fc-4055-4de4-ae5b-d9a523a01abc-kube-api-access-9lm6n\") pod \"community-operators-lzjch\" (UID: \"dc8751fc-4055-4de4-ae5b-d9a523a01abc\") " pod="openshift-marketplace/community-operators-lzjch" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.576913 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc8751fc-4055-4de4-ae5b-d9a523a01abc-utilities\") pod \"community-operators-lzjch\" (UID: \"dc8751fc-4055-4de4-ae5b-d9a523a01abc\") " pod="openshift-marketplace/community-operators-lzjch" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.577239 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc8751fc-4055-4de4-ae5b-d9a523a01abc-catalog-content\") pod \"community-operators-lzjch\" (UID: \"dc8751fc-4055-4de4-ae5b-d9a523a01abc\") " pod="openshift-marketplace/community-operators-lzjch" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.577268 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc8751fc-4055-4de4-ae5b-d9a523a01abc-utilities\") pod \"community-operators-lzjch\" (UID: \"dc8751fc-4055-4de4-ae5b-d9a523a01abc\") " pod="openshift-marketplace/community-operators-lzjch" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.595399 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9lm6n\" (UniqueName: \"kubernetes.io/projected/dc8751fc-4055-4de4-ae5b-d9a523a01abc-kube-api-access-9lm6n\") pod \"community-operators-lzjch\" (UID: \"dc8751fc-4055-4de4-ae5b-d9a523a01abc\") " pod="openshift-marketplace/community-operators-lzjch" Jan 05 22:17:15 crc kubenswrapper[4910]: I0105 22:17:15.716081 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lzjch" Jan 05 22:17:16 crc kubenswrapper[4910]: I0105 22:17:16.217509 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lzjch"] Jan 05 22:17:17 crc kubenswrapper[4910]: I0105 22:17:17.120050 4910 generic.go:334] "Generic (PLEG): container finished" podID="dc8751fc-4055-4de4-ae5b-d9a523a01abc" containerID="7142697bde5bbc0a9f440a34cc69d3e0d225af1eae7d2e582b318144fcaa10a0" exitCode=0 Jan 05 22:17:17 crc kubenswrapper[4910]: I0105 22:17:17.120153 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lzjch" event={"ID":"dc8751fc-4055-4de4-ae5b-d9a523a01abc","Type":"ContainerDied","Data":"7142697bde5bbc0a9f440a34cc69d3e0d225af1eae7d2e582b318144fcaa10a0"} Jan 05 22:17:17 crc kubenswrapper[4910]: I0105 22:17:17.120556 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lzjch" event={"ID":"dc8751fc-4055-4de4-ae5b-d9a523a01abc","Type":"ContainerStarted","Data":"d451a87bde5e80379dd10bb5f90e89f4da57cc91180055c7c20470a3a354448c"} Jan 05 22:17:19 crc kubenswrapper[4910]: I0105 22:17:19.139017 4910 generic.go:334] "Generic (PLEG): container finished" podID="dc8751fc-4055-4de4-ae5b-d9a523a01abc" containerID="d528f524648d54b6efd6927e23aac54fa5c2ac5f6707649f4ebb5f48996da133" exitCode=0 Jan 05 22:17:19 crc kubenswrapper[4910]: I0105 22:17:19.139113 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lzjch" event={"ID":"dc8751fc-4055-4de4-ae5b-d9a523a01abc","Type":"ContainerDied","Data":"d528f524648d54b6efd6927e23aac54fa5c2ac5f6707649f4ebb5f48996da133"} Jan 05 22:17:20 crc kubenswrapper[4910]: I0105 22:17:20.151018 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lzjch" event={"ID":"dc8751fc-4055-4de4-ae5b-d9a523a01abc","Type":"ContainerStarted","Data":"7c35139294b115454bb4ce40ed53494e1605b5c2360131b12ba7eb152c18778f"} Jan 05 22:17:25 crc kubenswrapper[4910]: I0105 22:17:25.717236 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-lzjch" Jan 05 22:17:25 crc kubenswrapper[4910]: I0105 22:17:25.717914 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-lzjch" Jan 05 22:17:25 crc kubenswrapper[4910]: I0105 22:17:25.761491 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-lzjch" Jan 05 22:17:25 crc kubenswrapper[4910]: I0105 22:17:25.785212 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-lzjch" podStartSLOduration=8.356664469 podStartE2EDuration="10.785192621s" podCreationTimestamp="2026-01-05 22:17:15 +0000 UTC" firstStartedPulling="2026-01-05 22:17:17.123191643 +0000 UTC m=+1568.700689313" lastFinishedPulling="2026-01-05 22:17:19.551719805 +0000 UTC m=+1571.129217465" observedRunningTime="2026-01-05 22:17:20.17565003 +0000 UTC m=+1571.753147710" watchObservedRunningTime="2026-01-05 22:17:25.785192621 +0000 UTC m=+1577.362690291" Jan 05 22:17:26 crc kubenswrapper[4910]: I0105 22:17:26.252475 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-lzjch" Jan 05 22:17:26 crc kubenswrapper[4910]: I0105 22:17:26.296165 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lzjch"] Jan 05 22:17:28 crc kubenswrapper[4910]: I0105 22:17:28.226938 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-lzjch" podUID="dc8751fc-4055-4de4-ae5b-d9a523a01abc" containerName="registry-server" containerID="cri-o://7c35139294b115454bb4ce40ed53494e1605b5c2360131b12ba7eb152c18778f" gracePeriod=2 Jan 05 22:17:29 crc kubenswrapper[4910]: I0105 22:17:29.236721 4910 generic.go:334] "Generic (PLEG): container finished" podID="dc8751fc-4055-4de4-ae5b-d9a523a01abc" containerID="7c35139294b115454bb4ce40ed53494e1605b5c2360131b12ba7eb152c18778f" exitCode=0 Jan 05 22:17:29 crc kubenswrapper[4910]: I0105 22:17:29.236973 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lzjch" event={"ID":"dc8751fc-4055-4de4-ae5b-d9a523a01abc","Type":"ContainerDied","Data":"7c35139294b115454bb4ce40ed53494e1605b5c2360131b12ba7eb152c18778f"} Jan 05 22:17:29 crc kubenswrapper[4910]: I0105 22:17:29.719522 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lzjch" Jan 05 22:17:29 crc kubenswrapper[4910]: I0105 22:17:29.785696 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9lm6n\" (UniqueName: \"kubernetes.io/projected/dc8751fc-4055-4de4-ae5b-d9a523a01abc-kube-api-access-9lm6n\") pod \"dc8751fc-4055-4de4-ae5b-d9a523a01abc\" (UID: \"dc8751fc-4055-4de4-ae5b-d9a523a01abc\") " Jan 05 22:17:29 crc kubenswrapper[4910]: I0105 22:17:29.785896 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc8751fc-4055-4de4-ae5b-d9a523a01abc-catalog-content\") pod \"dc8751fc-4055-4de4-ae5b-d9a523a01abc\" (UID: \"dc8751fc-4055-4de4-ae5b-d9a523a01abc\") " Jan 05 22:17:29 crc kubenswrapper[4910]: I0105 22:17:29.785920 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc8751fc-4055-4de4-ae5b-d9a523a01abc-utilities\") pod \"dc8751fc-4055-4de4-ae5b-d9a523a01abc\" (UID: \"dc8751fc-4055-4de4-ae5b-d9a523a01abc\") " Jan 05 22:17:29 crc kubenswrapper[4910]: I0105 22:17:29.786828 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dc8751fc-4055-4de4-ae5b-d9a523a01abc-utilities" (OuterVolumeSpecName: "utilities") pod "dc8751fc-4055-4de4-ae5b-d9a523a01abc" (UID: "dc8751fc-4055-4de4-ae5b-d9a523a01abc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:17:29 crc kubenswrapper[4910]: I0105 22:17:29.792178 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc8751fc-4055-4de4-ae5b-d9a523a01abc-kube-api-access-9lm6n" (OuterVolumeSpecName: "kube-api-access-9lm6n") pod "dc8751fc-4055-4de4-ae5b-d9a523a01abc" (UID: "dc8751fc-4055-4de4-ae5b-d9a523a01abc"). InnerVolumeSpecName "kube-api-access-9lm6n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:17:29 crc kubenswrapper[4910]: I0105 22:17:29.838785 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dc8751fc-4055-4de4-ae5b-d9a523a01abc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dc8751fc-4055-4de4-ae5b-d9a523a01abc" (UID: "dc8751fc-4055-4de4-ae5b-d9a523a01abc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:17:29 crc kubenswrapper[4910]: I0105 22:17:29.887205 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc8751fc-4055-4de4-ae5b-d9a523a01abc-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 22:17:29 crc kubenswrapper[4910]: I0105 22:17:29.887237 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc8751fc-4055-4de4-ae5b-d9a523a01abc-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 22:17:29 crc kubenswrapper[4910]: I0105 22:17:29.887252 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9lm6n\" (UniqueName: \"kubernetes.io/projected/dc8751fc-4055-4de4-ae5b-d9a523a01abc-kube-api-access-9lm6n\") on node \"crc\" DevicePath \"\"" Jan 05 22:17:30 crc kubenswrapper[4910]: I0105 22:17:30.246802 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lzjch" event={"ID":"dc8751fc-4055-4de4-ae5b-d9a523a01abc","Type":"ContainerDied","Data":"d451a87bde5e80379dd10bb5f90e89f4da57cc91180055c7c20470a3a354448c"} Jan 05 22:17:30 crc kubenswrapper[4910]: I0105 22:17:30.246884 4910 scope.go:117] "RemoveContainer" containerID="7c35139294b115454bb4ce40ed53494e1605b5c2360131b12ba7eb152c18778f" Jan 05 22:17:30 crc kubenswrapper[4910]: I0105 22:17:30.247007 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lzjch" Jan 05 22:17:30 crc kubenswrapper[4910]: I0105 22:17:30.273691 4910 scope.go:117] "RemoveContainer" containerID="d528f524648d54b6efd6927e23aac54fa5c2ac5f6707649f4ebb5f48996da133" Jan 05 22:17:30 crc kubenswrapper[4910]: I0105 22:17:30.306077 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lzjch"] Jan 05 22:17:30 crc kubenswrapper[4910]: I0105 22:17:30.314227 4910 scope.go:117] "RemoveContainer" containerID="7142697bde5bbc0a9f440a34cc69d3e0d225af1eae7d2e582b318144fcaa10a0" Jan 05 22:17:30 crc kubenswrapper[4910]: I0105 22:17:30.314694 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-lzjch"] Jan 05 22:17:30 crc kubenswrapper[4910]: I0105 22:17:30.733072 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc8751fc-4055-4de4-ae5b-d9a523a01abc" path="/var/lib/kubelet/pods/dc8751fc-4055-4de4-ae5b-d9a523a01abc/volumes" Jan 05 22:17:41 crc kubenswrapper[4910]: I0105 22:17:41.593384 4910 scope.go:117] "RemoveContainer" containerID="4f574442341e268414fe2cd5561245804e6af6ccc1e963a2268387892e7fe7fd" Jan 05 22:17:41 crc kubenswrapper[4910]: I0105 22:17:41.627227 4910 scope.go:117] "RemoveContainer" containerID="04739ac8541b963a572f27c473632741c9601a8fcf8c88e6bb84e5530fcc2531" Jan 05 22:17:41 crc kubenswrapper[4910]: I0105 22:17:41.671695 4910 scope.go:117] "RemoveContainer" containerID="ca4ab0b3e97c3a0c6d935237ab326c6797dbb8d63f989b52bb60164c52dd940e" Jan 05 22:17:41 crc kubenswrapper[4910]: I0105 22:17:41.714000 4910 scope.go:117] "RemoveContainer" containerID="5966fa5734c3ed51558c3d877b4b5b45ed7556cf861a0f54ae98be8bd16f7a20" Jan 05 22:17:41 crc kubenswrapper[4910]: I0105 22:17:41.740112 4910 scope.go:117] "RemoveContainer" containerID="f0973ba8f153769879aba399ae8990d5d2d00746e48c5db9253cff5df721d6f9" Jan 05 22:17:41 crc kubenswrapper[4910]: I0105 22:17:41.788612 4910 scope.go:117] "RemoveContainer" containerID="b79059d44414a4899f09fc0f78288c60bf77b33617f459df2cbbbe3c43f950e9" Jan 05 22:17:41 crc kubenswrapper[4910]: I0105 22:17:41.811307 4910 scope.go:117] "RemoveContainer" containerID="57959d13503bb849c6442bdf3d6f0c3ed65925527985e5fdecd977b087aed4b3" Jan 05 22:18:10 crc kubenswrapper[4910]: I0105 22:18:10.952868 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:18:10 crc kubenswrapper[4910]: I0105 22:18:10.953621 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:18:35 crc kubenswrapper[4910]: I0105 22:18:35.607239 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-lnbp7"] Jan 05 22:18:35 crc kubenswrapper[4910]: E0105 22:18:35.608498 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc8751fc-4055-4de4-ae5b-d9a523a01abc" containerName="registry-server" Jan 05 22:18:35 crc kubenswrapper[4910]: I0105 22:18:35.608511 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc8751fc-4055-4de4-ae5b-d9a523a01abc" containerName="registry-server" Jan 05 22:18:35 crc kubenswrapper[4910]: E0105 22:18:35.608521 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc8751fc-4055-4de4-ae5b-d9a523a01abc" containerName="extract-utilities" Jan 05 22:18:35 crc kubenswrapper[4910]: I0105 22:18:35.608527 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc8751fc-4055-4de4-ae5b-d9a523a01abc" containerName="extract-utilities" Jan 05 22:18:35 crc kubenswrapper[4910]: E0105 22:18:35.608538 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc8751fc-4055-4de4-ae5b-d9a523a01abc" containerName="extract-content" Jan 05 22:18:35 crc kubenswrapper[4910]: I0105 22:18:35.608544 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc8751fc-4055-4de4-ae5b-d9a523a01abc" containerName="extract-content" Jan 05 22:18:35 crc kubenswrapper[4910]: I0105 22:18:35.608702 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc8751fc-4055-4de4-ae5b-d9a523a01abc" containerName="registry-server" Jan 05 22:18:35 crc kubenswrapper[4910]: I0105 22:18:35.609769 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lnbp7" Jan 05 22:18:35 crc kubenswrapper[4910]: I0105 22:18:35.638048 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lnbp7"] Jan 05 22:18:35 crc kubenswrapper[4910]: I0105 22:18:35.705266 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/846e425a-c172-4c09-a839-f83b433f12ed-utilities\") pod \"certified-operators-lnbp7\" (UID: \"846e425a-c172-4c09-a839-f83b433f12ed\") " pod="openshift-marketplace/certified-operators-lnbp7" Jan 05 22:18:35 crc kubenswrapper[4910]: I0105 22:18:35.705367 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dtbfn\" (UniqueName: \"kubernetes.io/projected/846e425a-c172-4c09-a839-f83b433f12ed-kube-api-access-dtbfn\") pod \"certified-operators-lnbp7\" (UID: \"846e425a-c172-4c09-a839-f83b433f12ed\") " pod="openshift-marketplace/certified-operators-lnbp7" Jan 05 22:18:35 crc kubenswrapper[4910]: I0105 22:18:35.705424 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/846e425a-c172-4c09-a839-f83b433f12ed-catalog-content\") pod \"certified-operators-lnbp7\" (UID: \"846e425a-c172-4c09-a839-f83b433f12ed\") " pod="openshift-marketplace/certified-operators-lnbp7" Jan 05 22:18:35 crc kubenswrapper[4910]: I0105 22:18:35.806607 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dtbfn\" (UniqueName: \"kubernetes.io/projected/846e425a-c172-4c09-a839-f83b433f12ed-kube-api-access-dtbfn\") pod \"certified-operators-lnbp7\" (UID: \"846e425a-c172-4c09-a839-f83b433f12ed\") " pod="openshift-marketplace/certified-operators-lnbp7" Jan 05 22:18:35 crc kubenswrapper[4910]: I0105 22:18:35.806660 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/846e425a-c172-4c09-a839-f83b433f12ed-catalog-content\") pod \"certified-operators-lnbp7\" (UID: \"846e425a-c172-4c09-a839-f83b433f12ed\") " pod="openshift-marketplace/certified-operators-lnbp7" Jan 05 22:18:35 crc kubenswrapper[4910]: I0105 22:18:35.806744 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/846e425a-c172-4c09-a839-f83b433f12ed-utilities\") pod \"certified-operators-lnbp7\" (UID: \"846e425a-c172-4c09-a839-f83b433f12ed\") " pod="openshift-marketplace/certified-operators-lnbp7" Jan 05 22:18:35 crc kubenswrapper[4910]: I0105 22:18:35.807821 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/846e425a-c172-4c09-a839-f83b433f12ed-utilities\") pod \"certified-operators-lnbp7\" (UID: \"846e425a-c172-4c09-a839-f83b433f12ed\") " pod="openshift-marketplace/certified-operators-lnbp7" Jan 05 22:18:35 crc kubenswrapper[4910]: I0105 22:18:35.808010 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/846e425a-c172-4c09-a839-f83b433f12ed-catalog-content\") pod \"certified-operators-lnbp7\" (UID: \"846e425a-c172-4c09-a839-f83b433f12ed\") " pod="openshift-marketplace/certified-operators-lnbp7" Jan 05 22:18:35 crc kubenswrapper[4910]: I0105 22:18:35.842025 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dtbfn\" (UniqueName: \"kubernetes.io/projected/846e425a-c172-4c09-a839-f83b433f12ed-kube-api-access-dtbfn\") pod \"certified-operators-lnbp7\" (UID: \"846e425a-c172-4c09-a839-f83b433f12ed\") " pod="openshift-marketplace/certified-operators-lnbp7" Jan 05 22:18:35 crc kubenswrapper[4910]: I0105 22:18:35.951100 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lnbp7" Jan 05 22:18:36 crc kubenswrapper[4910]: I0105 22:18:36.223719 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lnbp7"] Jan 05 22:18:36 crc kubenswrapper[4910]: I0105 22:18:36.762058 4910 generic.go:334] "Generic (PLEG): container finished" podID="846e425a-c172-4c09-a839-f83b433f12ed" containerID="ff137228d366839da3df28a5e7b31b66a03ebc331c98e20eb725080a0f9aa6ba" exitCode=0 Jan 05 22:18:36 crc kubenswrapper[4910]: I0105 22:18:36.762175 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lnbp7" event={"ID":"846e425a-c172-4c09-a839-f83b433f12ed","Type":"ContainerDied","Data":"ff137228d366839da3df28a5e7b31b66a03ebc331c98e20eb725080a0f9aa6ba"} Jan 05 22:18:36 crc kubenswrapper[4910]: I0105 22:18:36.762536 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lnbp7" event={"ID":"846e425a-c172-4c09-a839-f83b433f12ed","Type":"ContainerStarted","Data":"81304b28780b0c9d9746a945d277c81e77fa56ed460a8f5082f60cf0b575aef1"} Jan 05 22:18:36 crc kubenswrapper[4910]: I0105 22:18:36.764442 4910 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 05 22:18:37 crc kubenswrapper[4910]: I0105 22:18:37.771910 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lnbp7" event={"ID":"846e425a-c172-4c09-a839-f83b433f12ed","Type":"ContainerStarted","Data":"c91c5f3940f1921e1f436db59030120d4ae961b5b310f6af8741e61f07825b9c"} Jan 05 22:18:38 crc kubenswrapper[4910]: I0105 22:18:38.780919 4910 generic.go:334] "Generic (PLEG): container finished" podID="846e425a-c172-4c09-a839-f83b433f12ed" containerID="c91c5f3940f1921e1f436db59030120d4ae961b5b310f6af8741e61f07825b9c" exitCode=0 Jan 05 22:18:38 crc kubenswrapper[4910]: I0105 22:18:38.780996 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lnbp7" event={"ID":"846e425a-c172-4c09-a839-f83b433f12ed","Type":"ContainerDied","Data":"c91c5f3940f1921e1f436db59030120d4ae961b5b310f6af8741e61f07825b9c"} Jan 05 22:18:39 crc kubenswrapper[4910]: I0105 22:18:39.792495 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lnbp7" event={"ID":"846e425a-c172-4c09-a839-f83b433f12ed","Type":"ContainerStarted","Data":"59e8c0e529ec2f06db5629af33de0d3af85f494ca3dd6fe9b6c31150857e7530"} Jan 05 22:18:39 crc kubenswrapper[4910]: I0105 22:18:39.815389 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-lnbp7" podStartSLOduration=2.287713334 podStartE2EDuration="4.815371679s" podCreationTimestamp="2026-01-05 22:18:35 +0000 UTC" firstStartedPulling="2026-01-05 22:18:36.764164645 +0000 UTC m=+1648.341662315" lastFinishedPulling="2026-01-05 22:18:39.29182299 +0000 UTC m=+1650.869320660" observedRunningTime="2026-01-05 22:18:39.811739969 +0000 UTC m=+1651.389237649" watchObservedRunningTime="2026-01-05 22:18:39.815371679 +0000 UTC m=+1651.392869349" Jan 05 22:18:40 crc kubenswrapper[4910]: I0105 22:18:40.952925 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:18:40 crc kubenswrapper[4910]: I0105 22:18:40.953502 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:18:41 crc kubenswrapper[4910]: I0105 22:18:41.949111 4910 scope.go:117] "RemoveContainer" containerID="44506650ad4a40575aa61344dc4d96523c508c48166a6845b6b1d2011a42b387" Jan 05 22:18:41 crc kubenswrapper[4910]: I0105 22:18:41.972398 4910 scope.go:117] "RemoveContainer" containerID="6daa2eb7900c845da95b4889f00144bf520b49eeafeeefc6d62129f8760b3df1" Jan 05 22:18:42 crc kubenswrapper[4910]: I0105 22:18:42.035292 4910 scope.go:117] "RemoveContainer" containerID="9e5bad872d1ceb46b26c5dc21dec8556316d8cada45123a1f5bd1c291685e9f4" Jan 05 22:18:42 crc kubenswrapper[4910]: I0105 22:18:42.086257 4910 scope.go:117] "RemoveContainer" containerID="a0e248b48425380302b1988bb335f1102fb9d344cce326d7af9e5dd2f6475bc5" Jan 05 22:18:45 crc kubenswrapper[4910]: I0105 22:18:45.952306 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-lnbp7" Jan 05 22:18:45 crc kubenswrapper[4910]: I0105 22:18:45.952807 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-lnbp7" Jan 05 22:18:46 crc kubenswrapper[4910]: I0105 22:18:46.006100 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-lnbp7" Jan 05 22:18:46 crc kubenswrapper[4910]: I0105 22:18:46.880307 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-lnbp7" Jan 05 22:18:46 crc kubenswrapper[4910]: I0105 22:18:46.927759 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lnbp7"] Jan 05 22:18:48 crc kubenswrapper[4910]: I0105 22:18:48.855366 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-lnbp7" podUID="846e425a-c172-4c09-a839-f83b433f12ed" containerName="registry-server" containerID="cri-o://59e8c0e529ec2f06db5629af33de0d3af85f494ca3dd6fe9b6c31150857e7530" gracePeriod=2 Jan 05 22:18:49 crc kubenswrapper[4910]: I0105 22:18:49.863591 4910 generic.go:334] "Generic (PLEG): container finished" podID="846e425a-c172-4c09-a839-f83b433f12ed" containerID="59e8c0e529ec2f06db5629af33de0d3af85f494ca3dd6fe9b6c31150857e7530" exitCode=0 Jan 05 22:18:49 crc kubenswrapper[4910]: I0105 22:18:49.863639 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lnbp7" event={"ID":"846e425a-c172-4c09-a839-f83b433f12ed","Type":"ContainerDied","Data":"59e8c0e529ec2f06db5629af33de0d3af85f494ca3dd6fe9b6c31150857e7530"} Jan 05 22:18:50 crc kubenswrapper[4910]: I0105 22:18:50.200535 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lnbp7" Jan 05 22:18:50 crc kubenswrapper[4910]: I0105 22:18:50.347914 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/846e425a-c172-4c09-a839-f83b433f12ed-catalog-content\") pod \"846e425a-c172-4c09-a839-f83b433f12ed\" (UID: \"846e425a-c172-4c09-a839-f83b433f12ed\") " Jan 05 22:18:50 crc kubenswrapper[4910]: I0105 22:18:50.348053 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dtbfn\" (UniqueName: \"kubernetes.io/projected/846e425a-c172-4c09-a839-f83b433f12ed-kube-api-access-dtbfn\") pod \"846e425a-c172-4c09-a839-f83b433f12ed\" (UID: \"846e425a-c172-4c09-a839-f83b433f12ed\") " Jan 05 22:18:50 crc kubenswrapper[4910]: I0105 22:18:50.349027 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/846e425a-c172-4c09-a839-f83b433f12ed-utilities\") pod \"846e425a-c172-4c09-a839-f83b433f12ed\" (UID: \"846e425a-c172-4c09-a839-f83b433f12ed\") " Jan 05 22:18:50 crc kubenswrapper[4910]: I0105 22:18:50.349834 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/846e425a-c172-4c09-a839-f83b433f12ed-utilities" (OuterVolumeSpecName: "utilities") pod "846e425a-c172-4c09-a839-f83b433f12ed" (UID: "846e425a-c172-4c09-a839-f83b433f12ed"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:18:50 crc kubenswrapper[4910]: I0105 22:18:50.354286 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/846e425a-c172-4c09-a839-f83b433f12ed-kube-api-access-dtbfn" (OuterVolumeSpecName: "kube-api-access-dtbfn") pod "846e425a-c172-4c09-a839-f83b433f12ed" (UID: "846e425a-c172-4c09-a839-f83b433f12ed"). InnerVolumeSpecName "kube-api-access-dtbfn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:18:50 crc kubenswrapper[4910]: I0105 22:18:50.399394 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/846e425a-c172-4c09-a839-f83b433f12ed-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "846e425a-c172-4c09-a839-f83b433f12ed" (UID: "846e425a-c172-4c09-a839-f83b433f12ed"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:18:50 crc kubenswrapper[4910]: I0105 22:18:50.451449 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/846e425a-c172-4c09-a839-f83b433f12ed-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 22:18:50 crc kubenswrapper[4910]: I0105 22:18:50.451486 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dtbfn\" (UniqueName: \"kubernetes.io/projected/846e425a-c172-4c09-a839-f83b433f12ed-kube-api-access-dtbfn\") on node \"crc\" DevicePath \"\"" Jan 05 22:18:50 crc kubenswrapper[4910]: I0105 22:18:50.451499 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/846e425a-c172-4c09-a839-f83b433f12ed-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 22:18:50 crc kubenswrapper[4910]: I0105 22:18:50.874112 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lnbp7" event={"ID":"846e425a-c172-4c09-a839-f83b433f12ed","Type":"ContainerDied","Data":"81304b28780b0c9d9746a945d277c81e77fa56ed460a8f5082f60cf0b575aef1"} Jan 05 22:18:50 crc kubenswrapper[4910]: I0105 22:18:50.874188 4910 scope.go:117] "RemoveContainer" containerID="59e8c0e529ec2f06db5629af33de0d3af85f494ca3dd6fe9b6c31150857e7530" Jan 05 22:18:50 crc kubenswrapper[4910]: I0105 22:18:50.874188 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lnbp7" Jan 05 22:18:50 crc kubenswrapper[4910]: I0105 22:18:50.899609 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lnbp7"] Jan 05 22:18:50 crc kubenswrapper[4910]: I0105 22:18:50.899724 4910 scope.go:117] "RemoveContainer" containerID="c91c5f3940f1921e1f436db59030120d4ae961b5b310f6af8741e61f07825b9c" Jan 05 22:18:50 crc kubenswrapper[4910]: I0105 22:18:50.907835 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-lnbp7"] Jan 05 22:18:50 crc kubenswrapper[4910]: I0105 22:18:50.954423 4910 scope.go:117] "RemoveContainer" containerID="ff137228d366839da3df28a5e7b31b66a03ebc331c98e20eb725080a0f9aa6ba" Jan 05 22:18:52 crc kubenswrapper[4910]: I0105 22:18:52.732387 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="846e425a-c172-4c09-a839-f83b433f12ed" path="/var/lib/kubelet/pods/846e425a-c172-4c09-a839-f83b433f12ed/volumes" Jan 05 22:19:10 crc kubenswrapper[4910]: I0105 22:19:10.952953 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:19:10 crc kubenswrapper[4910]: I0105 22:19:10.953789 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:19:10 crc kubenswrapper[4910]: I0105 22:19:10.953846 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 22:19:11 crc kubenswrapper[4910]: I0105 22:19:11.041542 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"88fdda57c119e3e2037b5a97cf4fa9afe875ac076db0f345ac5091c08bf08c43"} pod="openshift-machine-config-operator/machine-config-daemon-p4t85" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 05 22:19:11 crc kubenswrapper[4910]: I0105 22:19:11.041650 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" containerID="cri-o://88fdda57c119e3e2037b5a97cf4fa9afe875ac076db0f345ac5091c08bf08c43" gracePeriod=600 Jan 05 22:19:11 crc kubenswrapper[4910]: E0105 22:19:11.160961 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:19:12 crc kubenswrapper[4910]: I0105 22:19:12.052163 4910 generic.go:334] "Generic (PLEG): container finished" podID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerID="88fdda57c119e3e2037b5a97cf4fa9afe875ac076db0f345ac5091c08bf08c43" exitCode=0 Jan 05 22:19:12 crc kubenswrapper[4910]: I0105 22:19:12.052229 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerDied","Data":"88fdda57c119e3e2037b5a97cf4fa9afe875ac076db0f345ac5091c08bf08c43"} Jan 05 22:19:12 crc kubenswrapper[4910]: I0105 22:19:12.052292 4910 scope.go:117] "RemoveContainer" containerID="3c994ce088089ca2a9dc19bf92bc43649f3bc30178471fa64d55a2db65d9d2ab" Jan 05 22:19:12 crc kubenswrapper[4910]: I0105 22:19:12.053036 4910 scope.go:117] "RemoveContainer" containerID="88fdda57c119e3e2037b5a97cf4fa9afe875ac076db0f345ac5091c08bf08c43" Jan 05 22:19:12 crc kubenswrapper[4910]: E0105 22:19:12.053334 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:19:23 crc kubenswrapper[4910]: I0105 22:19:23.721463 4910 scope.go:117] "RemoveContainer" containerID="88fdda57c119e3e2037b5a97cf4fa9afe875ac076db0f345ac5091c08bf08c43" Jan 05 22:19:23 crc kubenswrapper[4910]: E0105 22:19:23.722366 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:19:34 crc kubenswrapper[4910]: I0105 22:19:34.721562 4910 scope.go:117] "RemoveContainer" containerID="88fdda57c119e3e2037b5a97cf4fa9afe875ac076db0f345ac5091c08bf08c43" Jan 05 22:19:34 crc kubenswrapper[4910]: E0105 22:19:34.722550 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:19:42 crc kubenswrapper[4910]: I0105 22:19:42.153085 4910 scope.go:117] "RemoveContainer" containerID="2c488eda0d2dfa5be7f48895c6bc7e782d4dd9655987f5f376f5c2c6127ec222" Jan 05 22:19:42 crc kubenswrapper[4910]: I0105 22:19:42.182633 4910 scope.go:117] "RemoveContainer" containerID="7d5136f4f6ce7105da26c27dfc52656f8ab2a759bcfaccf2d8f969116e9751d5" Jan 05 22:19:42 crc kubenswrapper[4910]: I0105 22:19:42.230877 4910 scope.go:117] "RemoveContainer" containerID="1352af66477e9c4c651d53fee72ab2422bf9224b47dd0d348acdbc01b60ee9ae" Jan 05 22:19:42 crc kubenswrapper[4910]: I0105 22:19:42.250215 4910 scope.go:117] "RemoveContainer" containerID="145b76ee96ce39fface07ddb7e7426e99956691dcfb26b5588c0c4adce94f7d5" Jan 05 22:19:42 crc kubenswrapper[4910]: I0105 22:19:42.271604 4910 scope.go:117] "RemoveContainer" containerID="b7619a936c7a930eb76cafc76fa049a86a194b4efb240e66cd9d1ddb9c037037" Jan 05 22:19:42 crc kubenswrapper[4910]: I0105 22:19:42.325751 4910 scope.go:117] "RemoveContainer" containerID="5e825dd38b907536857a97445f1e68ba17937cbaba208d22383b08c67caa5ac5" Jan 05 22:19:42 crc kubenswrapper[4910]: I0105 22:19:42.365629 4910 scope.go:117] "RemoveContainer" containerID="50328ae4a92fd73ed7c1a91c0b9f3e496e9daedafb9457a9a7502901363c23bf" Jan 05 22:19:42 crc kubenswrapper[4910]: I0105 22:19:42.383442 4910 scope.go:117] "RemoveContainer" containerID="e76acc70d2f3c0357d76eabf5265baa098e43cc6676d8f72632539c98f35b12e" Jan 05 22:19:42 crc kubenswrapper[4910]: I0105 22:19:42.400903 4910 scope.go:117] "RemoveContainer" containerID="cae8bd0cb87895a687cbd5f20d318700417d702fab40faf85596436cd24929d1" Jan 05 22:19:45 crc kubenswrapper[4910]: I0105 22:19:45.722291 4910 scope.go:117] "RemoveContainer" containerID="88fdda57c119e3e2037b5a97cf4fa9afe875ac076db0f345ac5091c08bf08c43" Jan 05 22:19:45 crc kubenswrapper[4910]: E0105 22:19:45.722827 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:20:00 crc kubenswrapper[4910]: I0105 22:20:00.721814 4910 scope.go:117] "RemoveContainer" containerID="88fdda57c119e3e2037b5a97cf4fa9afe875ac076db0f345ac5091c08bf08c43" Jan 05 22:20:00 crc kubenswrapper[4910]: E0105 22:20:00.724411 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:20:13 crc kubenswrapper[4910]: I0105 22:20:13.721294 4910 scope.go:117] "RemoveContainer" containerID="88fdda57c119e3e2037b5a97cf4fa9afe875ac076db0f345ac5091c08bf08c43" Jan 05 22:20:13 crc kubenswrapper[4910]: E0105 22:20:13.722275 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:20:28 crc kubenswrapper[4910]: I0105 22:20:28.728610 4910 scope.go:117] "RemoveContainer" containerID="88fdda57c119e3e2037b5a97cf4fa9afe875ac076db0f345ac5091c08bf08c43" Jan 05 22:20:28 crc kubenswrapper[4910]: E0105 22:20:28.729522 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:20:39 crc kubenswrapper[4910]: I0105 22:20:39.721497 4910 scope.go:117] "RemoveContainer" containerID="88fdda57c119e3e2037b5a97cf4fa9afe875ac076db0f345ac5091c08bf08c43" Jan 05 22:20:39 crc kubenswrapper[4910]: E0105 22:20:39.723359 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:20:42 crc kubenswrapper[4910]: I0105 22:20:42.551646 4910 scope.go:117] "RemoveContainer" containerID="10869218f49d1497aa8b4413fabc4bce2443981d8eca4525be5a59927d56345d" Jan 05 22:20:42 crc kubenswrapper[4910]: I0105 22:20:42.573372 4910 scope.go:117] "RemoveContainer" containerID="a15750a24f3b614dbf189a0ea0b3ef39ab10388dffcb53a3c89703b0c4d6c6f7" Jan 05 22:20:42 crc kubenswrapper[4910]: I0105 22:20:42.618117 4910 scope.go:117] "RemoveContainer" containerID="e752583795fe468de9af6eb52c315e5c49f57548b9b236bdb28a3bb75692ebb7" Jan 05 22:20:42 crc kubenswrapper[4910]: I0105 22:20:42.655969 4910 scope.go:117] "RemoveContainer" containerID="05e89eb9ca56e3ebe59045f592314e58faf89eea75b4fe0a9ff2a77177a668a3" Jan 05 22:20:42 crc kubenswrapper[4910]: I0105 22:20:42.683210 4910 scope.go:117] "RemoveContainer" containerID="708b16276678b2822ae86c9c52e58e344dbcf830fd5f034e5d7cb53f881b9997" Jan 05 22:20:51 crc kubenswrapper[4910]: I0105 22:20:51.722864 4910 scope.go:117] "RemoveContainer" containerID="88fdda57c119e3e2037b5a97cf4fa9afe875ac076db0f345ac5091c08bf08c43" Jan 05 22:20:51 crc kubenswrapper[4910]: E0105 22:20:51.723890 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:21:04 crc kubenswrapper[4910]: I0105 22:21:04.722659 4910 scope.go:117] "RemoveContainer" containerID="88fdda57c119e3e2037b5a97cf4fa9afe875ac076db0f345ac5091c08bf08c43" Jan 05 22:21:04 crc kubenswrapper[4910]: E0105 22:21:04.723628 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:21:19 crc kubenswrapper[4910]: I0105 22:21:19.721544 4910 scope.go:117] "RemoveContainer" containerID="88fdda57c119e3e2037b5a97cf4fa9afe875ac076db0f345ac5091c08bf08c43" Jan 05 22:21:19 crc kubenswrapper[4910]: E0105 22:21:19.722888 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:21:33 crc kubenswrapper[4910]: I0105 22:21:33.721358 4910 scope.go:117] "RemoveContainer" containerID="88fdda57c119e3e2037b5a97cf4fa9afe875ac076db0f345ac5091c08bf08c43" Jan 05 22:21:33 crc kubenswrapper[4910]: E0105 22:21:33.723626 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:21:48 crc kubenswrapper[4910]: I0105 22:21:48.729794 4910 scope.go:117] "RemoveContainer" containerID="88fdda57c119e3e2037b5a97cf4fa9afe875ac076db0f345ac5091c08bf08c43" Jan 05 22:21:48 crc kubenswrapper[4910]: E0105 22:21:48.730839 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:22:03 crc kubenswrapper[4910]: I0105 22:22:03.721705 4910 scope.go:117] "RemoveContainer" containerID="88fdda57c119e3e2037b5a97cf4fa9afe875ac076db0f345ac5091c08bf08c43" Jan 05 22:22:03 crc kubenswrapper[4910]: E0105 22:22:03.722950 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:22:17 crc kubenswrapper[4910]: I0105 22:22:17.721547 4910 scope.go:117] "RemoveContainer" containerID="88fdda57c119e3e2037b5a97cf4fa9afe875ac076db0f345ac5091c08bf08c43" Jan 05 22:22:17 crc kubenswrapper[4910]: E0105 22:22:17.722326 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:22:32 crc kubenswrapper[4910]: I0105 22:22:32.722298 4910 scope.go:117] "RemoveContainer" containerID="88fdda57c119e3e2037b5a97cf4fa9afe875ac076db0f345ac5091c08bf08c43" Jan 05 22:22:32 crc kubenswrapper[4910]: E0105 22:22:32.723475 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:22:46 crc kubenswrapper[4910]: I0105 22:22:46.721507 4910 scope.go:117] "RemoveContainer" containerID="88fdda57c119e3e2037b5a97cf4fa9afe875ac076db0f345ac5091c08bf08c43" Jan 05 22:22:46 crc kubenswrapper[4910]: E0105 22:22:46.722082 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:22:59 crc kubenswrapper[4910]: I0105 22:22:59.722010 4910 scope.go:117] "RemoveContainer" containerID="88fdda57c119e3e2037b5a97cf4fa9afe875ac076db0f345ac5091c08bf08c43" Jan 05 22:22:59 crc kubenswrapper[4910]: E0105 22:22:59.722820 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:23:13 crc kubenswrapper[4910]: I0105 22:23:13.722544 4910 scope.go:117] "RemoveContainer" containerID="88fdda57c119e3e2037b5a97cf4fa9afe875ac076db0f345ac5091c08bf08c43" Jan 05 22:23:13 crc kubenswrapper[4910]: E0105 22:23:13.723800 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:23:24 crc kubenswrapper[4910]: I0105 22:23:24.721665 4910 scope.go:117] "RemoveContainer" containerID="88fdda57c119e3e2037b5a97cf4fa9afe875ac076db0f345ac5091c08bf08c43" Jan 05 22:23:24 crc kubenswrapper[4910]: E0105 22:23:24.722563 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:23:35 crc kubenswrapper[4910]: I0105 22:23:35.722238 4910 scope.go:117] "RemoveContainer" containerID="88fdda57c119e3e2037b5a97cf4fa9afe875ac076db0f345ac5091c08bf08c43" Jan 05 22:23:35 crc kubenswrapper[4910]: E0105 22:23:35.723278 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:23:49 crc kubenswrapper[4910]: I0105 22:23:49.721206 4910 scope.go:117] "RemoveContainer" containerID="88fdda57c119e3e2037b5a97cf4fa9afe875ac076db0f345ac5091c08bf08c43" Jan 05 22:23:49 crc kubenswrapper[4910]: E0105 22:23:49.721889 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:24:00 crc kubenswrapper[4910]: I0105 22:24:00.721848 4910 scope.go:117] "RemoveContainer" containerID="88fdda57c119e3e2037b5a97cf4fa9afe875ac076db0f345ac5091c08bf08c43" Jan 05 22:24:00 crc kubenswrapper[4910]: E0105 22:24:00.722821 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:24:13 crc kubenswrapper[4910]: I0105 22:24:13.721762 4910 scope.go:117] "RemoveContainer" containerID="88fdda57c119e3e2037b5a97cf4fa9afe875ac076db0f345ac5091c08bf08c43" Jan 05 22:24:14 crc kubenswrapper[4910]: I0105 22:24:14.696642 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerStarted","Data":"d8056f50a258b26e2d83ce053ae1afe592e18b3df0dfc6cb872dacd336e3237d"} Jan 05 22:25:19 crc kubenswrapper[4910]: I0105 22:25:19.618320 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-vbv26"] Jan 05 22:25:19 crc kubenswrapper[4910]: E0105 22:25:19.619214 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="846e425a-c172-4c09-a839-f83b433f12ed" containerName="registry-server" Jan 05 22:25:19 crc kubenswrapper[4910]: I0105 22:25:19.619229 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="846e425a-c172-4c09-a839-f83b433f12ed" containerName="registry-server" Jan 05 22:25:19 crc kubenswrapper[4910]: E0105 22:25:19.619255 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="846e425a-c172-4c09-a839-f83b433f12ed" containerName="extract-utilities" Jan 05 22:25:19 crc kubenswrapper[4910]: I0105 22:25:19.619264 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="846e425a-c172-4c09-a839-f83b433f12ed" containerName="extract-utilities" Jan 05 22:25:19 crc kubenswrapper[4910]: E0105 22:25:19.619283 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="846e425a-c172-4c09-a839-f83b433f12ed" containerName="extract-content" Jan 05 22:25:19 crc kubenswrapper[4910]: I0105 22:25:19.619292 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="846e425a-c172-4c09-a839-f83b433f12ed" containerName="extract-content" Jan 05 22:25:19 crc kubenswrapper[4910]: I0105 22:25:19.619486 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="846e425a-c172-4c09-a839-f83b433f12ed" containerName="registry-server" Jan 05 22:25:19 crc kubenswrapper[4910]: I0105 22:25:19.620652 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vbv26" Jan 05 22:25:19 crc kubenswrapper[4910]: I0105 22:25:19.638227 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vbv26"] Jan 05 22:25:19 crc kubenswrapper[4910]: I0105 22:25:19.780694 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c090d8ea-955b-4d2d-9a11-50919e980ae2-catalog-content\") pod \"redhat-marketplace-vbv26\" (UID: \"c090d8ea-955b-4d2d-9a11-50919e980ae2\") " pod="openshift-marketplace/redhat-marketplace-vbv26" Jan 05 22:25:19 crc kubenswrapper[4910]: I0105 22:25:19.780905 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nw2tn\" (UniqueName: \"kubernetes.io/projected/c090d8ea-955b-4d2d-9a11-50919e980ae2-kube-api-access-nw2tn\") pod \"redhat-marketplace-vbv26\" (UID: \"c090d8ea-955b-4d2d-9a11-50919e980ae2\") " pod="openshift-marketplace/redhat-marketplace-vbv26" Jan 05 22:25:19 crc kubenswrapper[4910]: I0105 22:25:19.781490 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c090d8ea-955b-4d2d-9a11-50919e980ae2-utilities\") pod \"redhat-marketplace-vbv26\" (UID: \"c090d8ea-955b-4d2d-9a11-50919e980ae2\") " pod="openshift-marketplace/redhat-marketplace-vbv26" Jan 05 22:25:19 crc kubenswrapper[4910]: I0105 22:25:19.789717 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-fdrsr"] Jan 05 22:25:19 crc kubenswrapper[4910]: I0105 22:25:19.791423 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fdrsr" Jan 05 22:25:19 crc kubenswrapper[4910]: I0105 22:25:19.807791 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fdrsr"] Jan 05 22:25:19 crc kubenswrapper[4910]: I0105 22:25:19.882763 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c090d8ea-955b-4d2d-9a11-50919e980ae2-utilities\") pod \"redhat-marketplace-vbv26\" (UID: \"c090d8ea-955b-4d2d-9a11-50919e980ae2\") " pod="openshift-marketplace/redhat-marketplace-vbv26" Jan 05 22:25:19 crc kubenswrapper[4910]: I0105 22:25:19.882943 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c090d8ea-955b-4d2d-9a11-50919e980ae2-catalog-content\") pod \"redhat-marketplace-vbv26\" (UID: \"c090d8ea-955b-4d2d-9a11-50919e980ae2\") " pod="openshift-marketplace/redhat-marketplace-vbv26" Jan 05 22:25:19 crc kubenswrapper[4910]: I0105 22:25:19.883525 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c090d8ea-955b-4d2d-9a11-50919e980ae2-catalog-content\") pod \"redhat-marketplace-vbv26\" (UID: \"c090d8ea-955b-4d2d-9a11-50919e980ae2\") " pod="openshift-marketplace/redhat-marketplace-vbv26" Jan 05 22:25:19 crc kubenswrapper[4910]: I0105 22:25:19.883574 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nw2tn\" (UniqueName: \"kubernetes.io/projected/c090d8ea-955b-4d2d-9a11-50919e980ae2-kube-api-access-nw2tn\") pod \"redhat-marketplace-vbv26\" (UID: \"c090d8ea-955b-4d2d-9a11-50919e980ae2\") " pod="openshift-marketplace/redhat-marketplace-vbv26" Jan 05 22:25:19 crc kubenswrapper[4910]: I0105 22:25:19.883732 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c090d8ea-955b-4d2d-9a11-50919e980ae2-utilities\") pod \"redhat-marketplace-vbv26\" (UID: \"c090d8ea-955b-4d2d-9a11-50919e980ae2\") " pod="openshift-marketplace/redhat-marketplace-vbv26" Jan 05 22:25:19 crc kubenswrapper[4910]: I0105 22:25:19.904960 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nw2tn\" (UniqueName: \"kubernetes.io/projected/c090d8ea-955b-4d2d-9a11-50919e980ae2-kube-api-access-nw2tn\") pod \"redhat-marketplace-vbv26\" (UID: \"c090d8ea-955b-4d2d-9a11-50919e980ae2\") " pod="openshift-marketplace/redhat-marketplace-vbv26" Jan 05 22:25:19 crc kubenswrapper[4910]: I0105 22:25:19.942299 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vbv26" Jan 05 22:25:19 crc kubenswrapper[4910]: I0105 22:25:19.986014 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451-catalog-content\") pod \"redhat-operators-fdrsr\" (UID: \"44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451\") " pod="openshift-marketplace/redhat-operators-fdrsr" Jan 05 22:25:19 crc kubenswrapper[4910]: I0105 22:25:19.986185 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7hwrq\" (UniqueName: \"kubernetes.io/projected/44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451-kube-api-access-7hwrq\") pod \"redhat-operators-fdrsr\" (UID: \"44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451\") " pod="openshift-marketplace/redhat-operators-fdrsr" Jan 05 22:25:19 crc kubenswrapper[4910]: I0105 22:25:19.986220 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451-utilities\") pod \"redhat-operators-fdrsr\" (UID: \"44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451\") " pod="openshift-marketplace/redhat-operators-fdrsr" Jan 05 22:25:20 crc kubenswrapper[4910]: I0105 22:25:20.087314 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7hwrq\" (UniqueName: \"kubernetes.io/projected/44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451-kube-api-access-7hwrq\") pod \"redhat-operators-fdrsr\" (UID: \"44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451\") " pod="openshift-marketplace/redhat-operators-fdrsr" Jan 05 22:25:20 crc kubenswrapper[4910]: I0105 22:25:20.087373 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451-utilities\") pod \"redhat-operators-fdrsr\" (UID: \"44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451\") " pod="openshift-marketplace/redhat-operators-fdrsr" Jan 05 22:25:20 crc kubenswrapper[4910]: I0105 22:25:20.087435 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451-catalog-content\") pod \"redhat-operators-fdrsr\" (UID: \"44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451\") " pod="openshift-marketplace/redhat-operators-fdrsr" Jan 05 22:25:20 crc kubenswrapper[4910]: I0105 22:25:20.087936 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451-catalog-content\") pod \"redhat-operators-fdrsr\" (UID: \"44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451\") " pod="openshift-marketplace/redhat-operators-fdrsr" Jan 05 22:25:20 crc kubenswrapper[4910]: I0105 22:25:20.088872 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451-utilities\") pod \"redhat-operators-fdrsr\" (UID: \"44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451\") " pod="openshift-marketplace/redhat-operators-fdrsr" Jan 05 22:25:20 crc kubenswrapper[4910]: I0105 22:25:20.112187 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7hwrq\" (UniqueName: \"kubernetes.io/projected/44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451-kube-api-access-7hwrq\") pod \"redhat-operators-fdrsr\" (UID: \"44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451\") " pod="openshift-marketplace/redhat-operators-fdrsr" Jan 05 22:25:20 crc kubenswrapper[4910]: I0105 22:25:20.398510 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vbv26"] Jan 05 22:25:20 crc kubenswrapper[4910]: I0105 22:25:20.410668 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fdrsr" Jan 05 22:25:20 crc kubenswrapper[4910]: I0105 22:25:20.691937 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fdrsr"] Jan 05 22:25:20 crc kubenswrapper[4910]: W0105 22:25:20.699588 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod44ba5e17_a72d_4a73_a1d3_f8f3e4a7a451.slice/crio-81b3bbb8c40b9536429ff894e5d07512e3cc72973753a8d92813b79e36558c70 WatchSource:0}: Error finding container 81b3bbb8c40b9536429ff894e5d07512e3cc72973753a8d92813b79e36558c70: Status 404 returned error can't find the container with id 81b3bbb8c40b9536429ff894e5d07512e3cc72973753a8d92813b79e36558c70 Jan 05 22:25:21 crc kubenswrapper[4910]: I0105 22:25:21.246694 4910 generic.go:334] "Generic (PLEG): container finished" podID="c090d8ea-955b-4d2d-9a11-50919e980ae2" containerID="0f069e6cfa6ec4b2012a1512e11a7770a2b3b40117c9ee8b8478ce7524104c96" exitCode=0 Jan 05 22:25:21 crc kubenswrapper[4910]: I0105 22:25:21.246759 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vbv26" event={"ID":"c090d8ea-955b-4d2d-9a11-50919e980ae2","Type":"ContainerDied","Data":"0f069e6cfa6ec4b2012a1512e11a7770a2b3b40117c9ee8b8478ce7524104c96"} Jan 05 22:25:21 crc kubenswrapper[4910]: I0105 22:25:21.246788 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vbv26" event={"ID":"c090d8ea-955b-4d2d-9a11-50919e980ae2","Type":"ContainerStarted","Data":"8d71ffb526f1969c3a1e704a208ce6b9d8d7a37fd110b2dbe03da46166ba93d2"} Jan 05 22:25:21 crc kubenswrapper[4910]: I0105 22:25:21.249251 4910 generic.go:334] "Generic (PLEG): container finished" podID="44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451" containerID="0c6c8849f7fecdd7e4649ab13cbbaeab478a3e655ade97ee5377ef6ebff7bf53" exitCode=0 Jan 05 22:25:21 crc kubenswrapper[4910]: I0105 22:25:21.249275 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fdrsr" event={"ID":"44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451","Type":"ContainerDied","Data":"0c6c8849f7fecdd7e4649ab13cbbaeab478a3e655ade97ee5377ef6ebff7bf53"} Jan 05 22:25:21 crc kubenswrapper[4910]: I0105 22:25:21.249292 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fdrsr" event={"ID":"44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451","Type":"ContainerStarted","Data":"81b3bbb8c40b9536429ff894e5d07512e3cc72973753a8d92813b79e36558c70"} Jan 05 22:25:21 crc kubenswrapper[4910]: I0105 22:25:21.251279 4910 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 05 22:25:22 crc kubenswrapper[4910]: I0105 22:25:22.266448 4910 generic.go:334] "Generic (PLEG): container finished" podID="c090d8ea-955b-4d2d-9a11-50919e980ae2" containerID="7562a62a6d12f7205f88699528ffff7520f2f7dc77eb185e9837745a4a909c5e" exitCode=0 Jan 05 22:25:22 crc kubenswrapper[4910]: I0105 22:25:22.266800 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vbv26" event={"ID":"c090d8ea-955b-4d2d-9a11-50919e980ae2","Type":"ContainerDied","Data":"7562a62a6d12f7205f88699528ffff7520f2f7dc77eb185e9837745a4a909c5e"} Jan 05 22:25:23 crc kubenswrapper[4910]: I0105 22:25:23.277436 4910 generic.go:334] "Generic (PLEG): container finished" podID="44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451" containerID="59ca56a942490ec5f818aed2db6696f127761d6fd6bdfca54cb9acd513e1026f" exitCode=0 Jan 05 22:25:23 crc kubenswrapper[4910]: I0105 22:25:23.277507 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fdrsr" event={"ID":"44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451","Type":"ContainerDied","Data":"59ca56a942490ec5f818aed2db6696f127761d6fd6bdfca54cb9acd513e1026f"} Jan 05 22:25:23 crc kubenswrapper[4910]: I0105 22:25:23.280807 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vbv26" event={"ID":"c090d8ea-955b-4d2d-9a11-50919e980ae2","Type":"ContainerStarted","Data":"9950e0f4b2dd57578ab81d920188610d10472cdecd3a4cc4df85477445ca7132"} Jan 05 22:25:23 crc kubenswrapper[4910]: I0105 22:25:23.317675 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-vbv26" podStartSLOduration=2.824965281 podStartE2EDuration="4.317653414s" podCreationTimestamp="2026-01-05 22:25:19 +0000 UTC" firstStartedPulling="2026-01-05 22:25:21.251064031 +0000 UTC m=+2052.828561701" lastFinishedPulling="2026-01-05 22:25:22.743752164 +0000 UTC m=+2054.321249834" observedRunningTime="2026-01-05 22:25:23.315007077 +0000 UTC m=+2054.892504747" watchObservedRunningTime="2026-01-05 22:25:23.317653414 +0000 UTC m=+2054.895151074" Jan 05 22:25:24 crc kubenswrapper[4910]: I0105 22:25:24.307394 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fdrsr" event={"ID":"44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451","Type":"ContainerStarted","Data":"b7bac031cf77b631f35e0424bbabc7239fa065843e4c5fc5b0784ef5dbb28d56"} Jan 05 22:25:24 crc kubenswrapper[4910]: I0105 22:25:24.333134 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-fdrsr" podStartSLOduration=2.900985113 podStartE2EDuration="5.33309936s" podCreationTimestamp="2026-01-05 22:25:19 +0000 UTC" firstStartedPulling="2026-01-05 22:25:21.25105914 +0000 UTC m=+2052.828556810" lastFinishedPulling="2026-01-05 22:25:23.683173387 +0000 UTC m=+2055.260671057" observedRunningTime="2026-01-05 22:25:24.329112189 +0000 UTC m=+2055.906609859" watchObservedRunningTime="2026-01-05 22:25:24.33309936 +0000 UTC m=+2055.910597030" Jan 05 22:25:29 crc kubenswrapper[4910]: I0105 22:25:29.943360 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-vbv26" Jan 05 22:25:29 crc kubenswrapper[4910]: I0105 22:25:29.944094 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-vbv26" Jan 05 22:25:29 crc kubenswrapper[4910]: I0105 22:25:29.991019 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-vbv26" Jan 05 22:25:30 crc kubenswrapper[4910]: I0105 22:25:30.411342 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-fdrsr" Jan 05 22:25:30 crc kubenswrapper[4910]: I0105 22:25:30.411657 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-fdrsr" Jan 05 22:25:30 crc kubenswrapper[4910]: I0105 22:25:30.417604 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-vbv26" Jan 05 22:25:30 crc kubenswrapper[4910]: I0105 22:25:30.461234 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-fdrsr" Jan 05 22:25:30 crc kubenswrapper[4910]: I0105 22:25:30.475063 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vbv26"] Jan 05 22:25:31 crc kubenswrapper[4910]: I0105 22:25:31.396503 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-fdrsr" Jan 05 22:25:32 crc kubenswrapper[4910]: I0105 22:25:32.363931 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-vbv26" podUID="c090d8ea-955b-4d2d-9a11-50919e980ae2" containerName="registry-server" containerID="cri-o://9950e0f4b2dd57578ab81d920188610d10472cdecd3a4cc4df85477445ca7132" gracePeriod=2 Jan 05 22:25:32 crc kubenswrapper[4910]: I0105 22:25:32.634046 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fdrsr"] Jan 05 22:25:34 crc kubenswrapper[4910]: I0105 22:25:34.379277 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-fdrsr" podUID="44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451" containerName="registry-server" containerID="cri-o://b7bac031cf77b631f35e0424bbabc7239fa065843e4c5fc5b0784ef5dbb28d56" gracePeriod=2 Jan 05 22:25:35 crc kubenswrapper[4910]: I0105 22:25:35.391239 4910 generic.go:334] "Generic (PLEG): container finished" podID="c090d8ea-955b-4d2d-9a11-50919e980ae2" containerID="9950e0f4b2dd57578ab81d920188610d10472cdecd3a4cc4df85477445ca7132" exitCode=0 Jan 05 22:25:35 crc kubenswrapper[4910]: I0105 22:25:35.391469 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vbv26" event={"ID":"c090d8ea-955b-4d2d-9a11-50919e980ae2","Type":"ContainerDied","Data":"9950e0f4b2dd57578ab81d920188610d10472cdecd3a4cc4df85477445ca7132"} Jan 05 22:25:35 crc kubenswrapper[4910]: I0105 22:25:35.404955 4910 generic.go:334] "Generic (PLEG): container finished" podID="44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451" containerID="b7bac031cf77b631f35e0424bbabc7239fa065843e4c5fc5b0784ef5dbb28d56" exitCode=0 Jan 05 22:25:35 crc kubenswrapper[4910]: I0105 22:25:35.405000 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fdrsr" event={"ID":"44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451","Type":"ContainerDied","Data":"b7bac031cf77b631f35e0424bbabc7239fa065843e4c5fc5b0784ef5dbb28d56"} Jan 05 22:25:35 crc kubenswrapper[4910]: I0105 22:25:35.457820 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vbv26" Jan 05 22:25:35 crc kubenswrapper[4910]: I0105 22:25:35.623647 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c090d8ea-955b-4d2d-9a11-50919e980ae2-catalog-content\") pod \"c090d8ea-955b-4d2d-9a11-50919e980ae2\" (UID: \"c090d8ea-955b-4d2d-9a11-50919e980ae2\") " Jan 05 22:25:35 crc kubenswrapper[4910]: I0105 22:25:35.623744 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nw2tn\" (UniqueName: \"kubernetes.io/projected/c090d8ea-955b-4d2d-9a11-50919e980ae2-kube-api-access-nw2tn\") pod \"c090d8ea-955b-4d2d-9a11-50919e980ae2\" (UID: \"c090d8ea-955b-4d2d-9a11-50919e980ae2\") " Jan 05 22:25:35 crc kubenswrapper[4910]: I0105 22:25:35.623811 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c090d8ea-955b-4d2d-9a11-50919e980ae2-utilities\") pod \"c090d8ea-955b-4d2d-9a11-50919e980ae2\" (UID: \"c090d8ea-955b-4d2d-9a11-50919e980ae2\") " Jan 05 22:25:35 crc kubenswrapper[4910]: I0105 22:25:35.625587 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c090d8ea-955b-4d2d-9a11-50919e980ae2-utilities" (OuterVolumeSpecName: "utilities") pod "c090d8ea-955b-4d2d-9a11-50919e980ae2" (UID: "c090d8ea-955b-4d2d-9a11-50919e980ae2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:25:35 crc kubenswrapper[4910]: I0105 22:25:35.632797 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c090d8ea-955b-4d2d-9a11-50919e980ae2-kube-api-access-nw2tn" (OuterVolumeSpecName: "kube-api-access-nw2tn") pod "c090d8ea-955b-4d2d-9a11-50919e980ae2" (UID: "c090d8ea-955b-4d2d-9a11-50919e980ae2"). InnerVolumeSpecName "kube-api-access-nw2tn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:25:35 crc kubenswrapper[4910]: I0105 22:25:35.654046 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c090d8ea-955b-4d2d-9a11-50919e980ae2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c090d8ea-955b-4d2d-9a11-50919e980ae2" (UID: "c090d8ea-955b-4d2d-9a11-50919e980ae2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:25:35 crc kubenswrapper[4910]: I0105 22:25:35.725436 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nw2tn\" (UniqueName: \"kubernetes.io/projected/c090d8ea-955b-4d2d-9a11-50919e980ae2-kube-api-access-nw2tn\") on node \"crc\" DevicePath \"\"" Jan 05 22:25:35 crc kubenswrapper[4910]: I0105 22:25:35.725470 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c090d8ea-955b-4d2d-9a11-50919e980ae2-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 22:25:35 crc kubenswrapper[4910]: I0105 22:25:35.725482 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c090d8ea-955b-4d2d-9a11-50919e980ae2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 22:25:35 crc kubenswrapper[4910]: I0105 22:25:35.784651 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fdrsr" Jan 05 22:25:35 crc kubenswrapper[4910]: I0105 22:25:35.928592 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451-utilities\") pod \"44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451\" (UID: \"44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451\") " Jan 05 22:25:35 crc kubenswrapper[4910]: I0105 22:25:35.928701 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451-catalog-content\") pod \"44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451\" (UID: \"44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451\") " Jan 05 22:25:35 crc kubenswrapper[4910]: I0105 22:25:35.928768 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7hwrq\" (UniqueName: \"kubernetes.io/projected/44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451-kube-api-access-7hwrq\") pod \"44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451\" (UID: \"44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451\") " Jan 05 22:25:35 crc kubenswrapper[4910]: I0105 22:25:35.930670 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451-utilities" (OuterVolumeSpecName: "utilities") pod "44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451" (UID: "44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:25:35 crc kubenswrapper[4910]: I0105 22:25:35.934098 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451-kube-api-access-7hwrq" (OuterVolumeSpecName: "kube-api-access-7hwrq") pod "44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451" (UID: "44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451"). InnerVolumeSpecName "kube-api-access-7hwrq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:25:36 crc kubenswrapper[4910]: I0105 22:25:36.032299 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7hwrq\" (UniqueName: \"kubernetes.io/projected/44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451-kube-api-access-7hwrq\") on node \"crc\" DevicePath \"\"" Jan 05 22:25:36 crc kubenswrapper[4910]: I0105 22:25:36.032800 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 22:25:36 crc kubenswrapper[4910]: I0105 22:25:36.100457 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451" (UID: "44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:25:36 crc kubenswrapper[4910]: I0105 22:25:36.133471 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 22:25:36 crc kubenswrapper[4910]: I0105 22:25:36.415168 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fdrsr" event={"ID":"44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451","Type":"ContainerDied","Data":"81b3bbb8c40b9536429ff894e5d07512e3cc72973753a8d92813b79e36558c70"} Jan 05 22:25:36 crc kubenswrapper[4910]: I0105 22:25:36.415301 4910 scope.go:117] "RemoveContainer" containerID="b7bac031cf77b631f35e0424bbabc7239fa065843e4c5fc5b0784ef5dbb28d56" Jan 05 22:25:36 crc kubenswrapper[4910]: I0105 22:25:36.415460 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fdrsr" Jan 05 22:25:36 crc kubenswrapper[4910]: I0105 22:25:36.420773 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vbv26" event={"ID":"c090d8ea-955b-4d2d-9a11-50919e980ae2","Type":"ContainerDied","Data":"8d71ffb526f1969c3a1e704a208ce6b9d8d7a37fd110b2dbe03da46166ba93d2"} Jan 05 22:25:36 crc kubenswrapper[4910]: I0105 22:25:36.420871 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vbv26" Jan 05 22:25:36 crc kubenswrapper[4910]: I0105 22:25:36.444465 4910 scope.go:117] "RemoveContainer" containerID="59ca56a942490ec5f818aed2db6696f127761d6fd6bdfca54cb9acd513e1026f" Jan 05 22:25:36 crc kubenswrapper[4910]: I0105 22:25:36.473826 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fdrsr"] Jan 05 22:25:36 crc kubenswrapper[4910]: I0105 22:25:36.480648 4910 scope.go:117] "RemoveContainer" containerID="0c6c8849f7fecdd7e4649ab13cbbaeab478a3e655ade97ee5377ef6ebff7bf53" Jan 05 22:25:36 crc kubenswrapper[4910]: I0105 22:25:36.487703 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-fdrsr"] Jan 05 22:25:36 crc kubenswrapper[4910]: I0105 22:25:36.493900 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vbv26"] Jan 05 22:25:36 crc kubenswrapper[4910]: I0105 22:25:36.498129 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-vbv26"] Jan 05 22:25:36 crc kubenswrapper[4910]: I0105 22:25:36.502275 4910 scope.go:117] "RemoveContainer" containerID="9950e0f4b2dd57578ab81d920188610d10472cdecd3a4cc4df85477445ca7132" Jan 05 22:25:36 crc kubenswrapper[4910]: I0105 22:25:36.526436 4910 scope.go:117] "RemoveContainer" containerID="7562a62a6d12f7205f88699528ffff7520f2f7dc77eb185e9837745a4a909c5e" Jan 05 22:25:36 crc kubenswrapper[4910]: I0105 22:25:36.544050 4910 scope.go:117] "RemoveContainer" containerID="0f069e6cfa6ec4b2012a1512e11a7770a2b3b40117c9ee8b8478ce7524104c96" Jan 05 22:25:36 crc kubenswrapper[4910]: I0105 22:25:36.731403 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451" path="/var/lib/kubelet/pods/44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451/volumes" Jan 05 22:25:36 crc kubenswrapper[4910]: I0105 22:25:36.732110 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c090d8ea-955b-4d2d-9a11-50919e980ae2" path="/var/lib/kubelet/pods/c090d8ea-955b-4d2d-9a11-50919e980ae2/volumes" Jan 05 22:26:40 crc kubenswrapper[4910]: I0105 22:26:40.953099 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:26:40 crc kubenswrapper[4910]: I0105 22:26:40.954566 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:27:10 crc kubenswrapper[4910]: I0105 22:27:10.952841 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:27:10 crc kubenswrapper[4910]: I0105 22:27:10.953481 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:27:40 crc kubenswrapper[4910]: I0105 22:27:40.952477 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:27:40 crc kubenswrapper[4910]: I0105 22:27:40.953099 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:27:40 crc kubenswrapper[4910]: I0105 22:27:40.953179 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 22:27:40 crc kubenswrapper[4910]: I0105 22:27:40.953864 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d8056f50a258b26e2d83ce053ae1afe592e18b3df0dfc6cb872dacd336e3237d"} pod="openshift-machine-config-operator/machine-config-daemon-p4t85" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 05 22:27:40 crc kubenswrapper[4910]: I0105 22:27:40.953933 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" containerID="cri-o://d8056f50a258b26e2d83ce053ae1afe592e18b3df0dfc6cb872dacd336e3237d" gracePeriod=600 Jan 05 22:27:41 crc kubenswrapper[4910]: I0105 22:27:41.445519 4910 generic.go:334] "Generic (PLEG): container finished" podID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerID="d8056f50a258b26e2d83ce053ae1afe592e18b3df0dfc6cb872dacd336e3237d" exitCode=0 Jan 05 22:27:41 crc kubenswrapper[4910]: I0105 22:27:41.445663 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerDied","Data":"d8056f50a258b26e2d83ce053ae1afe592e18b3df0dfc6cb872dacd336e3237d"} Jan 05 22:27:41 crc kubenswrapper[4910]: I0105 22:27:41.445842 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerStarted","Data":"ecbcf471a393ef5c3d36665028d77090957964738e248ad9f27c971d5ab99cd9"} Jan 05 22:27:41 crc kubenswrapper[4910]: I0105 22:27:41.445862 4910 scope.go:117] "RemoveContainer" containerID="88fdda57c119e3e2037b5a97cf4fa9afe875ac076db0f345ac5091c08bf08c43" Jan 05 22:28:33 crc kubenswrapper[4910]: I0105 22:28:33.618993 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-zcj8q"] Jan 05 22:28:33 crc kubenswrapper[4910]: E0105 22:28:33.619937 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451" containerName="extract-content" Jan 05 22:28:33 crc kubenswrapper[4910]: I0105 22:28:33.619952 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451" containerName="extract-content" Jan 05 22:28:33 crc kubenswrapper[4910]: E0105 22:28:33.619962 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451" containerName="registry-server" Jan 05 22:28:33 crc kubenswrapper[4910]: I0105 22:28:33.619969 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451" containerName="registry-server" Jan 05 22:28:33 crc kubenswrapper[4910]: E0105 22:28:33.619984 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c090d8ea-955b-4d2d-9a11-50919e980ae2" containerName="registry-server" Jan 05 22:28:33 crc kubenswrapper[4910]: I0105 22:28:33.619991 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="c090d8ea-955b-4d2d-9a11-50919e980ae2" containerName="registry-server" Jan 05 22:28:33 crc kubenswrapper[4910]: E0105 22:28:33.620012 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451" containerName="extract-utilities" Jan 05 22:28:33 crc kubenswrapper[4910]: I0105 22:28:33.620019 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451" containerName="extract-utilities" Jan 05 22:28:33 crc kubenswrapper[4910]: E0105 22:28:33.620039 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c090d8ea-955b-4d2d-9a11-50919e980ae2" containerName="extract-content" Jan 05 22:28:33 crc kubenswrapper[4910]: I0105 22:28:33.620046 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="c090d8ea-955b-4d2d-9a11-50919e980ae2" containerName="extract-content" Jan 05 22:28:33 crc kubenswrapper[4910]: E0105 22:28:33.620062 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c090d8ea-955b-4d2d-9a11-50919e980ae2" containerName="extract-utilities" Jan 05 22:28:33 crc kubenswrapper[4910]: I0105 22:28:33.620071 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="c090d8ea-955b-4d2d-9a11-50919e980ae2" containerName="extract-utilities" Jan 05 22:28:33 crc kubenswrapper[4910]: I0105 22:28:33.620252 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="44ba5e17-a72d-4a73-a1d3-f8f3e4a7a451" containerName="registry-server" Jan 05 22:28:33 crc kubenswrapper[4910]: I0105 22:28:33.620276 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="c090d8ea-955b-4d2d-9a11-50919e980ae2" containerName="registry-server" Jan 05 22:28:33 crc kubenswrapper[4910]: I0105 22:28:33.621561 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zcj8q" Jan 05 22:28:33 crc kubenswrapper[4910]: I0105 22:28:33.636440 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zcj8q"] Jan 05 22:28:33 crc kubenswrapper[4910]: I0105 22:28:33.785889 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb8bc305-6a32-450c-af92-e34429b84de8-utilities\") pod \"community-operators-zcj8q\" (UID: \"fb8bc305-6a32-450c-af92-e34429b84de8\") " pod="openshift-marketplace/community-operators-zcj8q" Jan 05 22:28:33 crc kubenswrapper[4910]: I0105 22:28:33.786024 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lsrx8\" (UniqueName: \"kubernetes.io/projected/fb8bc305-6a32-450c-af92-e34429b84de8-kube-api-access-lsrx8\") pod \"community-operators-zcj8q\" (UID: \"fb8bc305-6a32-450c-af92-e34429b84de8\") " pod="openshift-marketplace/community-operators-zcj8q" Jan 05 22:28:33 crc kubenswrapper[4910]: I0105 22:28:33.786113 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb8bc305-6a32-450c-af92-e34429b84de8-catalog-content\") pod \"community-operators-zcj8q\" (UID: \"fb8bc305-6a32-450c-af92-e34429b84de8\") " pod="openshift-marketplace/community-operators-zcj8q" Jan 05 22:28:33 crc kubenswrapper[4910]: I0105 22:28:33.888616 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lsrx8\" (UniqueName: \"kubernetes.io/projected/fb8bc305-6a32-450c-af92-e34429b84de8-kube-api-access-lsrx8\") pod \"community-operators-zcj8q\" (UID: \"fb8bc305-6a32-450c-af92-e34429b84de8\") " pod="openshift-marketplace/community-operators-zcj8q" Jan 05 22:28:33 crc kubenswrapper[4910]: I0105 22:28:33.888766 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb8bc305-6a32-450c-af92-e34429b84de8-catalog-content\") pod \"community-operators-zcj8q\" (UID: \"fb8bc305-6a32-450c-af92-e34429b84de8\") " pod="openshift-marketplace/community-operators-zcj8q" Jan 05 22:28:33 crc kubenswrapper[4910]: I0105 22:28:33.888847 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb8bc305-6a32-450c-af92-e34429b84de8-utilities\") pod \"community-operators-zcj8q\" (UID: \"fb8bc305-6a32-450c-af92-e34429b84de8\") " pod="openshift-marketplace/community-operators-zcj8q" Jan 05 22:28:33 crc kubenswrapper[4910]: I0105 22:28:33.889507 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb8bc305-6a32-450c-af92-e34429b84de8-utilities\") pod \"community-operators-zcj8q\" (UID: \"fb8bc305-6a32-450c-af92-e34429b84de8\") " pod="openshift-marketplace/community-operators-zcj8q" Jan 05 22:28:33 crc kubenswrapper[4910]: I0105 22:28:33.889507 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb8bc305-6a32-450c-af92-e34429b84de8-catalog-content\") pod \"community-operators-zcj8q\" (UID: \"fb8bc305-6a32-450c-af92-e34429b84de8\") " pod="openshift-marketplace/community-operators-zcj8q" Jan 05 22:28:33 crc kubenswrapper[4910]: I0105 22:28:33.915870 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lsrx8\" (UniqueName: \"kubernetes.io/projected/fb8bc305-6a32-450c-af92-e34429b84de8-kube-api-access-lsrx8\") pod \"community-operators-zcj8q\" (UID: \"fb8bc305-6a32-450c-af92-e34429b84de8\") " pod="openshift-marketplace/community-operators-zcj8q" Jan 05 22:28:33 crc kubenswrapper[4910]: I0105 22:28:33.957812 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zcj8q" Jan 05 22:28:34 crc kubenswrapper[4910]: I0105 22:28:34.540378 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zcj8q"] Jan 05 22:28:34 crc kubenswrapper[4910]: W0105 22:28:34.549281 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfb8bc305_6a32_450c_af92_e34429b84de8.slice/crio-666deb806cd3d1dbab250e5ebd8d1d79050a147bc98ab5dae548bdbc69af2a24 WatchSource:0}: Error finding container 666deb806cd3d1dbab250e5ebd8d1d79050a147bc98ab5dae548bdbc69af2a24: Status 404 returned error can't find the container with id 666deb806cd3d1dbab250e5ebd8d1d79050a147bc98ab5dae548bdbc69af2a24 Jan 05 22:28:34 crc kubenswrapper[4910]: I0105 22:28:34.855202 4910 generic.go:334] "Generic (PLEG): container finished" podID="fb8bc305-6a32-450c-af92-e34429b84de8" containerID="1f84901fc42df1d0cb31151b22b3c16dd703282ac108bef7457bbda4164fbfb2" exitCode=0 Jan 05 22:28:34 crc kubenswrapper[4910]: I0105 22:28:34.855256 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zcj8q" event={"ID":"fb8bc305-6a32-450c-af92-e34429b84de8","Type":"ContainerDied","Data":"1f84901fc42df1d0cb31151b22b3c16dd703282ac108bef7457bbda4164fbfb2"} Jan 05 22:28:34 crc kubenswrapper[4910]: I0105 22:28:34.855286 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zcj8q" event={"ID":"fb8bc305-6a32-450c-af92-e34429b84de8","Type":"ContainerStarted","Data":"666deb806cd3d1dbab250e5ebd8d1d79050a147bc98ab5dae548bdbc69af2a24"} Jan 05 22:28:35 crc kubenswrapper[4910]: I0105 22:28:35.864262 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zcj8q" event={"ID":"fb8bc305-6a32-450c-af92-e34429b84de8","Type":"ContainerStarted","Data":"4746ab95a15b6529da73e4f15e75067672d48425a4d1bcf42646ff5812f57f64"} Jan 05 22:28:36 crc kubenswrapper[4910]: I0105 22:28:36.870894 4910 generic.go:334] "Generic (PLEG): container finished" podID="fb8bc305-6a32-450c-af92-e34429b84de8" containerID="4746ab95a15b6529da73e4f15e75067672d48425a4d1bcf42646ff5812f57f64" exitCode=0 Jan 05 22:28:36 crc kubenswrapper[4910]: I0105 22:28:36.870943 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zcj8q" event={"ID":"fb8bc305-6a32-450c-af92-e34429b84de8","Type":"ContainerDied","Data":"4746ab95a15b6529da73e4f15e75067672d48425a4d1bcf42646ff5812f57f64"} Jan 05 22:28:37 crc kubenswrapper[4910]: I0105 22:28:37.881012 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zcj8q" event={"ID":"fb8bc305-6a32-450c-af92-e34429b84de8","Type":"ContainerStarted","Data":"c49cf82631d32c9ff8ef74f0f7ca48fd897edc806ed1403994d11cfd7b9e8c56"} Jan 05 22:28:37 crc kubenswrapper[4910]: I0105 22:28:37.908832 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-zcj8q" podStartSLOduration=2.38938558 podStartE2EDuration="4.908806211s" podCreationTimestamp="2026-01-05 22:28:33 +0000 UTC" firstStartedPulling="2026-01-05 22:28:34.858173536 +0000 UTC m=+2246.435671196" lastFinishedPulling="2026-01-05 22:28:37.377594157 +0000 UTC m=+2248.955091827" observedRunningTime="2026-01-05 22:28:37.900629495 +0000 UTC m=+2249.478127165" watchObservedRunningTime="2026-01-05 22:28:37.908806211 +0000 UTC m=+2249.486303891" Jan 05 22:28:41 crc kubenswrapper[4910]: I0105 22:28:41.999088 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-bb4pd"] Jan 05 22:28:42 crc kubenswrapper[4910]: I0105 22:28:42.002281 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bb4pd" Jan 05 22:28:42 crc kubenswrapper[4910]: I0105 22:28:42.008166 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bb4pd"] Jan 05 22:28:42 crc kubenswrapper[4910]: I0105 22:28:42.124840 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6eea4e0-37cd-436e-b483-5777026faeee-utilities\") pod \"certified-operators-bb4pd\" (UID: \"a6eea4e0-37cd-436e-b483-5777026faeee\") " pod="openshift-marketplace/certified-operators-bb4pd" Jan 05 22:28:42 crc kubenswrapper[4910]: I0105 22:28:42.124908 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qffzt\" (UniqueName: \"kubernetes.io/projected/a6eea4e0-37cd-436e-b483-5777026faeee-kube-api-access-qffzt\") pod \"certified-operators-bb4pd\" (UID: \"a6eea4e0-37cd-436e-b483-5777026faeee\") " pod="openshift-marketplace/certified-operators-bb4pd" Jan 05 22:28:42 crc kubenswrapper[4910]: I0105 22:28:42.124948 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6eea4e0-37cd-436e-b483-5777026faeee-catalog-content\") pod \"certified-operators-bb4pd\" (UID: \"a6eea4e0-37cd-436e-b483-5777026faeee\") " pod="openshift-marketplace/certified-operators-bb4pd" Jan 05 22:28:42 crc kubenswrapper[4910]: I0105 22:28:42.226745 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6eea4e0-37cd-436e-b483-5777026faeee-catalog-content\") pod \"certified-operators-bb4pd\" (UID: \"a6eea4e0-37cd-436e-b483-5777026faeee\") " pod="openshift-marketplace/certified-operators-bb4pd" Jan 05 22:28:42 crc kubenswrapper[4910]: I0105 22:28:42.226891 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6eea4e0-37cd-436e-b483-5777026faeee-utilities\") pod \"certified-operators-bb4pd\" (UID: \"a6eea4e0-37cd-436e-b483-5777026faeee\") " pod="openshift-marketplace/certified-operators-bb4pd" Jan 05 22:28:42 crc kubenswrapper[4910]: I0105 22:28:42.226923 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qffzt\" (UniqueName: \"kubernetes.io/projected/a6eea4e0-37cd-436e-b483-5777026faeee-kube-api-access-qffzt\") pod \"certified-operators-bb4pd\" (UID: \"a6eea4e0-37cd-436e-b483-5777026faeee\") " pod="openshift-marketplace/certified-operators-bb4pd" Jan 05 22:28:42 crc kubenswrapper[4910]: I0105 22:28:42.227386 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6eea4e0-37cd-436e-b483-5777026faeee-catalog-content\") pod \"certified-operators-bb4pd\" (UID: \"a6eea4e0-37cd-436e-b483-5777026faeee\") " pod="openshift-marketplace/certified-operators-bb4pd" Jan 05 22:28:42 crc kubenswrapper[4910]: I0105 22:28:42.227481 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6eea4e0-37cd-436e-b483-5777026faeee-utilities\") pod \"certified-operators-bb4pd\" (UID: \"a6eea4e0-37cd-436e-b483-5777026faeee\") " pod="openshift-marketplace/certified-operators-bb4pd" Jan 05 22:28:42 crc kubenswrapper[4910]: I0105 22:28:42.251655 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qffzt\" (UniqueName: \"kubernetes.io/projected/a6eea4e0-37cd-436e-b483-5777026faeee-kube-api-access-qffzt\") pod \"certified-operators-bb4pd\" (UID: \"a6eea4e0-37cd-436e-b483-5777026faeee\") " pod="openshift-marketplace/certified-operators-bb4pd" Jan 05 22:28:42 crc kubenswrapper[4910]: I0105 22:28:42.324719 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bb4pd" Jan 05 22:28:42 crc kubenswrapper[4910]: I0105 22:28:42.792356 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bb4pd"] Jan 05 22:28:42 crc kubenswrapper[4910]: I0105 22:28:42.914614 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bb4pd" event={"ID":"a6eea4e0-37cd-436e-b483-5777026faeee","Type":"ContainerStarted","Data":"72672b12d425c982cf7b8e28f9f890bedd95b4131f0d446c0c43f650ce34d6a0"} Jan 05 22:28:43 crc kubenswrapper[4910]: I0105 22:28:43.922405 4910 generic.go:334] "Generic (PLEG): container finished" podID="a6eea4e0-37cd-436e-b483-5777026faeee" containerID="be65181bb4279004ee78d0f86ad40cb3154aa0f28c81f76635449c2c9e8a54ef" exitCode=0 Jan 05 22:28:43 crc kubenswrapper[4910]: I0105 22:28:43.922453 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bb4pd" event={"ID":"a6eea4e0-37cd-436e-b483-5777026faeee","Type":"ContainerDied","Data":"be65181bb4279004ee78d0f86ad40cb3154aa0f28c81f76635449c2c9e8a54ef"} Jan 05 22:28:43 crc kubenswrapper[4910]: I0105 22:28:43.957975 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-zcj8q" Jan 05 22:28:43 crc kubenswrapper[4910]: I0105 22:28:43.958057 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-zcj8q" Jan 05 22:28:44 crc kubenswrapper[4910]: I0105 22:28:44.006692 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-zcj8q" Jan 05 22:28:44 crc kubenswrapper[4910]: I0105 22:28:44.934599 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bb4pd" event={"ID":"a6eea4e0-37cd-436e-b483-5777026faeee","Type":"ContainerStarted","Data":"986e19984538541fca7c727ad03ed5a21e6475af23aa0d1d9fd2062fb5884f83"} Jan 05 22:28:44 crc kubenswrapper[4910]: I0105 22:28:44.990788 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-zcj8q" Jan 05 22:28:45 crc kubenswrapper[4910]: I0105 22:28:45.942814 4910 generic.go:334] "Generic (PLEG): container finished" podID="a6eea4e0-37cd-436e-b483-5777026faeee" containerID="986e19984538541fca7c727ad03ed5a21e6475af23aa0d1d9fd2062fb5884f83" exitCode=0 Jan 05 22:28:45 crc kubenswrapper[4910]: I0105 22:28:45.942913 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bb4pd" event={"ID":"a6eea4e0-37cd-436e-b483-5777026faeee","Type":"ContainerDied","Data":"986e19984538541fca7c727ad03ed5a21e6475af23aa0d1d9fd2062fb5884f83"} Jan 05 22:28:46 crc kubenswrapper[4910]: I0105 22:28:46.952793 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bb4pd" event={"ID":"a6eea4e0-37cd-436e-b483-5777026faeee","Type":"ContainerStarted","Data":"c382f95320fe40425f7b531291bc54e6f80b85f889b3c828815167bde5db29bd"} Jan 05 22:28:46 crc kubenswrapper[4910]: I0105 22:28:46.975940 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-bb4pd" podStartSLOduration=3.313488256 podStartE2EDuration="5.975906581s" podCreationTimestamp="2026-01-05 22:28:41 +0000 UTC" firstStartedPulling="2026-01-05 22:28:43.923986175 +0000 UTC m=+2255.501483855" lastFinishedPulling="2026-01-05 22:28:46.58640451 +0000 UTC m=+2258.163902180" observedRunningTime="2026-01-05 22:28:46.967378107 +0000 UTC m=+2258.544875807" watchObservedRunningTime="2026-01-05 22:28:46.975906581 +0000 UTC m=+2258.553404281" Jan 05 22:28:46 crc kubenswrapper[4910]: I0105 22:28:46.994231 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zcj8q"] Jan 05 22:28:46 crc kubenswrapper[4910]: I0105 22:28:46.994469 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-zcj8q" podUID="fb8bc305-6a32-450c-af92-e34429b84de8" containerName="registry-server" containerID="cri-o://c49cf82631d32c9ff8ef74f0f7ca48fd897edc806ed1403994d11cfd7b9e8c56" gracePeriod=2 Jan 05 22:28:47 crc kubenswrapper[4910]: I0105 22:28:47.387036 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zcj8q" Jan 05 22:28:47 crc kubenswrapper[4910]: I0105 22:28:47.506829 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb8bc305-6a32-450c-af92-e34429b84de8-utilities\") pod \"fb8bc305-6a32-450c-af92-e34429b84de8\" (UID: \"fb8bc305-6a32-450c-af92-e34429b84de8\") " Jan 05 22:28:47 crc kubenswrapper[4910]: I0105 22:28:47.506930 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb8bc305-6a32-450c-af92-e34429b84de8-catalog-content\") pod \"fb8bc305-6a32-450c-af92-e34429b84de8\" (UID: \"fb8bc305-6a32-450c-af92-e34429b84de8\") " Jan 05 22:28:47 crc kubenswrapper[4910]: I0105 22:28:47.507032 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lsrx8\" (UniqueName: \"kubernetes.io/projected/fb8bc305-6a32-450c-af92-e34429b84de8-kube-api-access-lsrx8\") pod \"fb8bc305-6a32-450c-af92-e34429b84de8\" (UID: \"fb8bc305-6a32-450c-af92-e34429b84de8\") " Jan 05 22:28:47 crc kubenswrapper[4910]: I0105 22:28:47.507670 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb8bc305-6a32-450c-af92-e34429b84de8-utilities" (OuterVolumeSpecName: "utilities") pod "fb8bc305-6a32-450c-af92-e34429b84de8" (UID: "fb8bc305-6a32-450c-af92-e34429b84de8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:28:47 crc kubenswrapper[4910]: I0105 22:28:47.523295 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb8bc305-6a32-450c-af92-e34429b84de8-kube-api-access-lsrx8" (OuterVolumeSpecName: "kube-api-access-lsrx8") pod "fb8bc305-6a32-450c-af92-e34429b84de8" (UID: "fb8bc305-6a32-450c-af92-e34429b84de8"). InnerVolumeSpecName "kube-api-access-lsrx8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:28:47 crc kubenswrapper[4910]: I0105 22:28:47.562755 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb8bc305-6a32-450c-af92-e34429b84de8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fb8bc305-6a32-450c-af92-e34429b84de8" (UID: "fb8bc305-6a32-450c-af92-e34429b84de8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:28:47 crc kubenswrapper[4910]: I0105 22:28:47.608202 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb8bc305-6a32-450c-af92-e34429b84de8-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 22:28:47 crc kubenswrapper[4910]: I0105 22:28:47.608235 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb8bc305-6a32-450c-af92-e34429b84de8-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 22:28:47 crc kubenswrapper[4910]: I0105 22:28:47.608247 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lsrx8\" (UniqueName: \"kubernetes.io/projected/fb8bc305-6a32-450c-af92-e34429b84de8-kube-api-access-lsrx8\") on node \"crc\" DevicePath \"\"" Jan 05 22:28:47 crc kubenswrapper[4910]: I0105 22:28:47.961505 4910 generic.go:334] "Generic (PLEG): container finished" podID="fb8bc305-6a32-450c-af92-e34429b84de8" containerID="c49cf82631d32c9ff8ef74f0f7ca48fd897edc806ed1403994d11cfd7b9e8c56" exitCode=0 Jan 05 22:28:47 crc kubenswrapper[4910]: I0105 22:28:47.962313 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zcj8q" Jan 05 22:28:47 crc kubenswrapper[4910]: I0105 22:28:47.962314 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zcj8q" event={"ID":"fb8bc305-6a32-450c-af92-e34429b84de8","Type":"ContainerDied","Data":"c49cf82631d32c9ff8ef74f0f7ca48fd897edc806ed1403994d11cfd7b9e8c56"} Jan 05 22:28:47 crc kubenswrapper[4910]: I0105 22:28:47.962712 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zcj8q" event={"ID":"fb8bc305-6a32-450c-af92-e34429b84de8","Type":"ContainerDied","Data":"666deb806cd3d1dbab250e5ebd8d1d79050a147bc98ab5dae548bdbc69af2a24"} Jan 05 22:28:47 crc kubenswrapper[4910]: I0105 22:28:47.962736 4910 scope.go:117] "RemoveContainer" containerID="c49cf82631d32c9ff8ef74f0f7ca48fd897edc806ed1403994d11cfd7b9e8c56" Jan 05 22:28:47 crc kubenswrapper[4910]: I0105 22:28:47.985655 4910 scope.go:117] "RemoveContainer" containerID="4746ab95a15b6529da73e4f15e75067672d48425a4d1bcf42646ff5812f57f64" Jan 05 22:28:48 crc kubenswrapper[4910]: I0105 22:28:48.002616 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zcj8q"] Jan 05 22:28:48 crc kubenswrapper[4910]: I0105 22:28:48.011922 4910 scope.go:117] "RemoveContainer" containerID="1f84901fc42df1d0cb31151b22b3c16dd703282ac108bef7457bbda4164fbfb2" Jan 05 22:28:48 crc kubenswrapper[4910]: I0105 22:28:48.012278 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-zcj8q"] Jan 05 22:28:48 crc kubenswrapper[4910]: I0105 22:28:48.033568 4910 scope.go:117] "RemoveContainer" containerID="c49cf82631d32c9ff8ef74f0f7ca48fd897edc806ed1403994d11cfd7b9e8c56" Jan 05 22:28:48 crc kubenswrapper[4910]: E0105 22:28:48.034236 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c49cf82631d32c9ff8ef74f0f7ca48fd897edc806ed1403994d11cfd7b9e8c56\": container with ID starting with c49cf82631d32c9ff8ef74f0f7ca48fd897edc806ed1403994d11cfd7b9e8c56 not found: ID does not exist" containerID="c49cf82631d32c9ff8ef74f0f7ca48fd897edc806ed1403994d11cfd7b9e8c56" Jan 05 22:28:48 crc kubenswrapper[4910]: I0105 22:28:48.034268 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c49cf82631d32c9ff8ef74f0f7ca48fd897edc806ed1403994d11cfd7b9e8c56"} err="failed to get container status \"c49cf82631d32c9ff8ef74f0f7ca48fd897edc806ed1403994d11cfd7b9e8c56\": rpc error: code = NotFound desc = could not find container \"c49cf82631d32c9ff8ef74f0f7ca48fd897edc806ed1403994d11cfd7b9e8c56\": container with ID starting with c49cf82631d32c9ff8ef74f0f7ca48fd897edc806ed1403994d11cfd7b9e8c56 not found: ID does not exist" Jan 05 22:28:48 crc kubenswrapper[4910]: I0105 22:28:48.034295 4910 scope.go:117] "RemoveContainer" containerID="4746ab95a15b6529da73e4f15e75067672d48425a4d1bcf42646ff5812f57f64" Jan 05 22:28:48 crc kubenswrapper[4910]: E0105 22:28:48.034665 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4746ab95a15b6529da73e4f15e75067672d48425a4d1bcf42646ff5812f57f64\": container with ID starting with 4746ab95a15b6529da73e4f15e75067672d48425a4d1bcf42646ff5812f57f64 not found: ID does not exist" containerID="4746ab95a15b6529da73e4f15e75067672d48425a4d1bcf42646ff5812f57f64" Jan 05 22:28:48 crc kubenswrapper[4910]: I0105 22:28:48.034712 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4746ab95a15b6529da73e4f15e75067672d48425a4d1bcf42646ff5812f57f64"} err="failed to get container status \"4746ab95a15b6529da73e4f15e75067672d48425a4d1bcf42646ff5812f57f64\": rpc error: code = NotFound desc = could not find container \"4746ab95a15b6529da73e4f15e75067672d48425a4d1bcf42646ff5812f57f64\": container with ID starting with 4746ab95a15b6529da73e4f15e75067672d48425a4d1bcf42646ff5812f57f64 not found: ID does not exist" Jan 05 22:28:48 crc kubenswrapper[4910]: I0105 22:28:48.034744 4910 scope.go:117] "RemoveContainer" containerID="1f84901fc42df1d0cb31151b22b3c16dd703282ac108bef7457bbda4164fbfb2" Jan 05 22:28:48 crc kubenswrapper[4910]: E0105 22:28:48.035072 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1f84901fc42df1d0cb31151b22b3c16dd703282ac108bef7457bbda4164fbfb2\": container with ID starting with 1f84901fc42df1d0cb31151b22b3c16dd703282ac108bef7457bbda4164fbfb2 not found: ID does not exist" containerID="1f84901fc42df1d0cb31151b22b3c16dd703282ac108bef7457bbda4164fbfb2" Jan 05 22:28:48 crc kubenswrapper[4910]: I0105 22:28:48.035161 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1f84901fc42df1d0cb31151b22b3c16dd703282ac108bef7457bbda4164fbfb2"} err="failed to get container status \"1f84901fc42df1d0cb31151b22b3c16dd703282ac108bef7457bbda4164fbfb2\": rpc error: code = NotFound desc = could not find container \"1f84901fc42df1d0cb31151b22b3c16dd703282ac108bef7457bbda4164fbfb2\": container with ID starting with 1f84901fc42df1d0cb31151b22b3c16dd703282ac108bef7457bbda4164fbfb2 not found: ID does not exist" Jan 05 22:28:48 crc kubenswrapper[4910]: I0105 22:28:48.752454 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb8bc305-6a32-450c-af92-e34429b84de8" path="/var/lib/kubelet/pods/fb8bc305-6a32-450c-af92-e34429b84de8/volumes" Jan 05 22:28:52 crc kubenswrapper[4910]: I0105 22:28:52.327495 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-bb4pd" Jan 05 22:28:52 crc kubenswrapper[4910]: I0105 22:28:52.329464 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-bb4pd" Jan 05 22:28:52 crc kubenswrapper[4910]: I0105 22:28:52.416095 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-bb4pd" Jan 05 22:28:53 crc kubenswrapper[4910]: I0105 22:28:53.066236 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-bb4pd" Jan 05 22:28:53 crc kubenswrapper[4910]: I0105 22:28:53.129276 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bb4pd"] Jan 05 22:28:55 crc kubenswrapper[4910]: I0105 22:28:55.016082 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-bb4pd" podUID="a6eea4e0-37cd-436e-b483-5777026faeee" containerName="registry-server" containerID="cri-o://c382f95320fe40425f7b531291bc54e6f80b85f889b3c828815167bde5db29bd" gracePeriod=2 Jan 05 22:28:56 crc kubenswrapper[4910]: I0105 22:28:56.026072 4910 generic.go:334] "Generic (PLEG): container finished" podID="a6eea4e0-37cd-436e-b483-5777026faeee" containerID="c382f95320fe40425f7b531291bc54e6f80b85f889b3c828815167bde5db29bd" exitCode=0 Jan 05 22:28:56 crc kubenswrapper[4910]: I0105 22:28:56.026092 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bb4pd" event={"ID":"a6eea4e0-37cd-436e-b483-5777026faeee","Type":"ContainerDied","Data":"c382f95320fe40425f7b531291bc54e6f80b85f889b3c828815167bde5db29bd"} Jan 05 22:28:56 crc kubenswrapper[4910]: I0105 22:28:56.541904 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bb4pd" Jan 05 22:28:56 crc kubenswrapper[4910]: I0105 22:28:56.644604 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6eea4e0-37cd-436e-b483-5777026faeee-utilities\") pod \"a6eea4e0-37cd-436e-b483-5777026faeee\" (UID: \"a6eea4e0-37cd-436e-b483-5777026faeee\") " Jan 05 22:28:56 crc kubenswrapper[4910]: I0105 22:28:56.644653 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qffzt\" (UniqueName: \"kubernetes.io/projected/a6eea4e0-37cd-436e-b483-5777026faeee-kube-api-access-qffzt\") pod \"a6eea4e0-37cd-436e-b483-5777026faeee\" (UID: \"a6eea4e0-37cd-436e-b483-5777026faeee\") " Jan 05 22:28:56 crc kubenswrapper[4910]: I0105 22:28:56.644689 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6eea4e0-37cd-436e-b483-5777026faeee-catalog-content\") pod \"a6eea4e0-37cd-436e-b483-5777026faeee\" (UID: \"a6eea4e0-37cd-436e-b483-5777026faeee\") " Jan 05 22:28:56 crc kubenswrapper[4910]: I0105 22:28:56.645656 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a6eea4e0-37cd-436e-b483-5777026faeee-utilities" (OuterVolumeSpecName: "utilities") pod "a6eea4e0-37cd-436e-b483-5777026faeee" (UID: "a6eea4e0-37cd-436e-b483-5777026faeee"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:28:56 crc kubenswrapper[4910]: I0105 22:28:56.652917 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6eea4e0-37cd-436e-b483-5777026faeee-kube-api-access-qffzt" (OuterVolumeSpecName: "kube-api-access-qffzt") pod "a6eea4e0-37cd-436e-b483-5777026faeee" (UID: "a6eea4e0-37cd-436e-b483-5777026faeee"). InnerVolumeSpecName "kube-api-access-qffzt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:28:56 crc kubenswrapper[4910]: I0105 22:28:56.696006 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a6eea4e0-37cd-436e-b483-5777026faeee-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a6eea4e0-37cd-436e-b483-5777026faeee" (UID: "a6eea4e0-37cd-436e-b483-5777026faeee"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:28:56 crc kubenswrapper[4910]: I0105 22:28:56.746931 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6eea4e0-37cd-436e-b483-5777026faeee-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 22:28:56 crc kubenswrapper[4910]: I0105 22:28:56.746972 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qffzt\" (UniqueName: \"kubernetes.io/projected/a6eea4e0-37cd-436e-b483-5777026faeee-kube-api-access-qffzt\") on node \"crc\" DevicePath \"\"" Jan 05 22:28:56 crc kubenswrapper[4910]: I0105 22:28:56.746984 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6eea4e0-37cd-436e-b483-5777026faeee-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 22:28:57 crc kubenswrapper[4910]: I0105 22:28:57.036948 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bb4pd" event={"ID":"a6eea4e0-37cd-436e-b483-5777026faeee","Type":"ContainerDied","Data":"72672b12d425c982cf7b8e28f9f890bedd95b4131f0d446c0c43f650ce34d6a0"} Jan 05 22:28:57 crc kubenswrapper[4910]: I0105 22:28:57.037023 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bb4pd" Jan 05 22:28:57 crc kubenswrapper[4910]: I0105 22:28:57.037376 4910 scope.go:117] "RemoveContainer" containerID="c382f95320fe40425f7b531291bc54e6f80b85f889b3c828815167bde5db29bd" Jan 05 22:28:57 crc kubenswrapper[4910]: I0105 22:28:57.057870 4910 scope.go:117] "RemoveContainer" containerID="986e19984538541fca7c727ad03ed5a21e6475af23aa0d1d9fd2062fb5884f83" Jan 05 22:28:57 crc kubenswrapper[4910]: I0105 22:28:57.066054 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bb4pd"] Jan 05 22:28:57 crc kubenswrapper[4910]: I0105 22:28:57.071551 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-bb4pd"] Jan 05 22:28:57 crc kubenswrapper[4910]: I0105 22:28:57.078434 4910 scope.go:117] "RemoveContainer" containerID="be65181bb4279004ee78d0f86ad40cb3154aa0f28c81f76635449c2c9e8a54ef" Jan 05 22:28:58 crc kubenswrapper[4910]: I0105 22:28:58.737228 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6eea4e0-37cd-436e-b483-5777026faeee" path="/var/lib/kubelet/pods/a6eea4e0-37cd-436e-b483-5777026faeee/volumes" Jan 05 22:30:00 crc kubenswrapper[4910]: I0105 22:30:00.158231 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460870-jhg2c"] Jan 05 22:30:00 crc kubenswrapper[4910]: E0105 22:30:00.159108 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb8bc305-6a32-450c-af92-e34429b84de8" containerName="registry-server" Jan 05 22:30:00 crc kubenswrapper[4910]: I0105 22:30:00.159141 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb8bc305-6a32-450c-af92-e34429b84de8" containerName="registry-server" Jan 05 22:30:00 crc kubenswrapper[4910]: E0105 22:30:00.159157 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb8bc305-6a32-450c-af92-e34429b84de8" containerName="extract-utilities" Jan 05 22:30:00 crc kubenswrapper[4910]: I0105 22:30:00.159167 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb8bc305-6a32-450c-af92-e34429b84de8" containerName="extract-utilities" Jan 05 22:30:00 crc kubenswrapper[4910]: E0105 22:30:00.159185 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6eea4e0-37cd-436e-b483-5777026faeee" containerName="extract-content" Jan 05 22:30:00 crc kubenswrapper[4910]: I0105 22:30:00.159194 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6eea4e0-37cd-436e-b483-5777026faeee" containerName="extract-content" Jan 05 22:30:00 crc kubenswrapper[4910]: E0105 22:30:00.159212 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6eea4e0-37cd-436e-b483-5777026faeee" containerName="registry-server" Jan 05 22:30:00 crc kubenswrapper[4910]: I0105 22:30:00.159218 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6eea4e0-37cd-436e-b483-5777026faeee" containerName="registry-server" Jan 05 22:30:00 crc kubenswrapper[4910]: E0105 22:30:00.159225 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6eea4e0-37cd-436e-b483-5777026faeee" containerName="extract-utilities" Jan 05 22:30:00 crc kubenswrapper[4910]: I0105 22:30:00.159231 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6eea4e0-37cd-436e-b483-5777026faeee" containerName="extract-utilities" Jan 05 22:30:00 crc kubenswrapper[4910]: E0105 22:30:00.159246 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb8bc305-6a32-450c-af92-e34429b84de8" containerName="extract-content" Jan 05 22:30:00 crc kubenswrapper[4910]: I0105 22:30:00.159251 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb8bc305-6a32-450c-af92-e34429b84de8" containerName="extract-content" Jan 05 22:30:00 crc kubenswrapper[4910]: I0105 22:30:00.159379 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6eea4e0-37cd-436e-b483-5777026faeee" containerName="registry-server" Jan 05 22:30:00 crc kubenswrapper[4910]: I0105 22:30:00.159393 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb8bc305-6a32-450c-af92-e34429b84de8" containerName="registry-server" Jan 05 22:30:00 crc kubenswrapper[4910]: I0105 22:30:00.159895 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460870-jhg2c" Jan 05 22:30:00 crc kubenswrapper[4910]: I0105 22:30:00.171160 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 05 22:30:00 crc kubenswrapper[4910]: I0105 22:30:00.171588 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 05 22:30:00 crc kubenswrapper[4910]: I0105 22:30:00.172637 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460870-jhg2c"] Jan 05 22:30:00 crc kubenswrapper[4910]: I0105 22:30:00.258979 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7xxr5\" (UniqueName: \"kubernetes.io/projected/46d6f885-2342-45f3-b84e-6ebd88cf4b2d-kube-api-access-7xxr5\") pod \"collect-profiles-29460870-jhg2c\" (UID: \"46d6f885-2342-45f3-b84e-6ebd88cf4b2d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460870-jhg2c" Jan 05 22:30:00 crc kubenswrapper[4910]: I0105 22:30:00.259043 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/46d6f885-2342-45f3-b84e-6ebd88cf4b2d-config-volume\") pod \"collect-profiles-29460870-jhg2c\" (UID: \"46d6f885-2342-45f3-b84e-6ebd88cf4b2d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460870-jhg2c" Jan 05 22:30:00 crc kubenswrapper[4910]: I0105 22:30:00.259190 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/46d6f885-2342-45f3-b84e-6ebd88cf4b2d-secret-volume\") pod \"collect-profiles-29460870-jhg2c\" (UID: \"46d6f885-2342-45f3-b84e-6ebd88cf4b2d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460870-jhg2c" Jan 05 22:30:00 crc kubenswrapper[4910]: I0105 22:30:00.360563 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7xxr5\" (UniqueName: \"kubernetes.io/projected/46d6f885-2342-45f3-b84e-6ebd88cf4b2d-kube-api-access-7xxr5\") pod \"collect-profiles-29460870-jhg2c\" (UID: \"46d6f885-2342-45f3-b84e-6ebd88cf4b2d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460870-jhg2c" Jan 05 22:30:00 crc kubenswrapper[4910]: I0105 22:30:00.360627 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/46d6f885-2342-45f3-b84e-6ebd88cf4b2d-config-volume\") pod \"collect-profiles-29460870-jhg2c\" (UID: \"46d6f885-2342-45f3-b84e-6ebd88cf4b2d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460870-jhg2c" Jan 05 22:30:00 crc kubenswrapper[4910]: I0105 22:30:00.360688 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/46d6f885-2342-45f3-b84e-6ebd88cf4b2d-secret-volume\") pod \"collect-profiles-29460870-jhg2c\" (UID: \"46d6f885-2342-45f3-b84e-6ebd88cf4b2d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460870-jhg2c" Jan 05 22:30:00 crc kubenswrapper[4910]: I0105 22:30:00.362105 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/46d6f885-2342-45f3-b84e-6ebd88cf4b2d-config-volume\") pod \"collect-profiles-29460870-jhg2c\" (UID: \"46d6f885-2342-45f3-b84e-6ebd88cf4b2d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460870-jhg2c" Jan 05 22:30:00 crc kubenswrapper[4910]: I0105 22:30:00.368915 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/46d6f885-2342-45f3-b84e-6ebd88cf4b2d-secret-volume\") pod \"collect-profiles-29460870-jhg2c\" (UID: \"46d6f885-2342-45f3-b84e-6ebd88cf4b2d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460870-jhg2c" Jan 05 22:30:00 crc kubenswrapper[4910]: I0105 22:30:00.379062 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7xxr5\" (UniqueName: \"kubernetes.io/projected/46d6f885-2342-45f3-b84e-6ebd88cf4b2d-kube-api-access-7xxr5\") pod \"collect-profiles-29460870-jhg2c\" (UID: \"46d6f885-2342-45f3-b84e-6ebd88cf4b2d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460870-jhg2c" Jan 05 22:30:00 crc kubenswrapper[4910]: I0105 22:30:00.478477 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460870-jhg2c" Jan 05 22:30:00 crc kubenswrapper[4910]: I0105 22:30:00.926213 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460870-jhg2c"] Jan 05 22:30:01 crc kubenswrapper[4910]: I0105 22:30:01.569761 4910 generic.go:334] "Generic (PLEG): container finished" podID="46d6f885-2342-45f3-b84e-6ebd88cf4b2d" containerID="18d9a969858bfdb362a7efd7c806d89cfdc3200eff1915b88c9c54709ecf940c" exitCode=0 Jan 05 22:30:01 crc kubenswrapper[4910]: I0105 22:30:01.569810 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29460870-jhg2c" event={"ID":"46d6f885-2342-45f3-b84e-6ebd88cf4b2d","Type":"ContainerDied","Data":"18d9a969858bfdb362a7efd7c806d89cfdc3200eff1915b88c9c54709ecf940c"} Jan 05 22:30:01 crc kubenswrapper[4910]: I0105 22:30:01.569860 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29460870-jhg2c" event={"ID":"46d6f885-2342-45f3-b84e-6ebd88cf4b2d","Type":"ContainerStarted","Data":"6814710ea90805c5e4f88343cb09f1f3019098d63b763cc27138a014805796fd"} Jan 05 22:30:02 crc kubenswrapper[4910]: I0105 22:30:02.872702 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460870-jhg2c" Jan 05 22:30:03 crc kubenswrapper[4910]: I0105 22:30:03.001615 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/46d6f885-2342-45f3-b84e-6ebd88cf4b2d-config-volume\") pod \"46d6f885-2342-45f3-b84e-6ebd88cf4b2d\" (UID: \"46d6f885-2342-45f3-b84e-6ebd88cf4b2d\") " Jan 05 22:30:03 crc kubenswrapper[4910]: I0105 22:30:03.001727 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7xxr5\" (UniqueName: \"kubernetes.io/projected/46d6f885-2342-45f3-b84e-6ebd88cf4b2d-kube-api-access-7xxr5\") pod \"46d6f885-2342-45f3-b84e-6ebd88cf4b2d\" (UID: \"46d6f885-2342-45f3-b84e-6ebd88cf4b2d\") " Jan 05 22:30:03 crc kubenswrapper[4910]: I0105 22:30:03.001787 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/46d6f885-2342-45f3-b84e-6ebd88cf4b2d-secret-volume\") pod \"46d6f885-2342-45f3-b84e-6ebd88cf4b2d\" (UID: \"46d6f885-2342-45f3-b84e-6ebd88cf4b2d\") " Jan 05 22:30:03 crc kubenswrapper[4910]: I0105 22:30:03.002379 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/46d6f885-2342-45f3-b84e-6ebd88cf4b2d-config-volume" (OuterVolumeSpecName: "config-volume") pod "46d6f885-2342-45f3-b84e-6ebd88cf4b2d" (UID: "46d6f885-2342-45f3-b84e-6ebd88cf4b2d"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:30:03 crc kubenswrapper[4910]: I0105 22:30:03.007206 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46d6f885-2342-45f3-b84e-6ebd88cf4b2d-kube-api-access-7xxr5" (OuterVolumeSpecName: "kube-api-access-7xxr5") pod "46d6f885-2342-45f3-b84e-6ebd88cf4b2d" (UID: "46d6f885-2342-45f3-b84e-6ebd88cf4b2d"). InnerVolumeSpecName "kube-api-access-7xxr5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:30:03 crc kubenswrapper[4910]: I0105 22:30:03.008005 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/46d6f885-2342-45f3-b84e-6ebd88cf4b2d-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "46d6f885-2342-45f3-b84e-6ebd88cf4b2d" (UID: "46d6f885-2342-45f3-b84e-6ebd88cf4b2d"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:30:03 crc kubenswrapper[4910]: I0105 22:30:03.104269 4910 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/46d6f885-2342-45f3-b84e-6ebd88cf4b2d-config-volume\") on node \"crc\" DevicePath \"\"" Jan 05 22:30:03 crc kubenswrapper[4910]: I0105 22:30:03.104311 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7xxr5\" (UniqueName: \"kubernetes.io/projected/46d6f885-2342-45f3-b84e-6ebd88cf4b2d-kube-api-access-7xxr5\") on node \"crc\" DevicePath \"\"" Jan 05 22:30:03 crc kubenswrapper[4910]: I0105 22:30:03.104325 4910 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/46d6f885-2342-45f3-b84e-6ebd88cf4b2d-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 05 22:30:03 crc kubenswrapper[4910]: I0105 22:30:03.585082 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29460870-jhg2c" event={"ID":"46d6f885-2342-45f3-b84e-6ebd88cf4b2d","Type":"ContainerDied","Data":"6814710ea90805c5e4f88343cb09f1f3019098d63b763cc27138a014805796fd"} Jan 05 22:30:03 crc kubenswrapper[4910]: I0105 22:30:03.585149 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6814710ea90805c5e4f88343cb09f1f3019098d63b763cc27138a014805796fd" Jan 05 22:30:03 crc kubenswrapper[4910]: I0105 22:30:03.585221 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460870-jhg2c" Jan 05 22:30:03 crc kubenswrapper[4910]: I0105 22:30:03.952848 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460825-hv5kn"] Jan 05 22:30:03 crc kubenswrapper[4910]: I0105 22:30:03.963706 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460825-hv5kn"] Jan 05 22:30:04 crc kubenswrapper[4910]: I0105 22:30:04.736021 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee40a1b4-967e-40aa-b6c0-eaf211346941" path="/var/lib/kubelet/pods/ee40a1b4-967e-40aa-b6c0-eaf211346941/volumes" Jan 05 22:30:10 crc kubenswrapper[4910]: I0105 22:30:10.952118 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:30:10 crc kubenswrapper[4910]: I0105 22:30:10.952810 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:30:40 crc kubenswrapper[4910]: I0105 22:30:40.953171 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:30:40 crc kubenswrapper[4910]: I0105 22:30:40.954382 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:30:42 crc kubenswrapper[4910]: I0105 22:30:42.937625 4910 scope.go:117] "RemoveContainer" containerID="a057eee485ddc3526ec9ba8e9e55caed65d80e741204ef899ff001441b02e82f" Jan 05 22:31:10 crc kubenswrapper[4910]: I0105 22:31:10.952630 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:31:10 crc kubenswrapper[4910]: I0105 22:31:10.953431 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:31:10 crc kubenswrapper[4910]: I0105 22:31:10.953500 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 22:31:10 crc kubenswrapper[4910]: I0105 22:31:10.954482 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ecbcf471a393ef5c3d36665028d77090957964738e248ad9f27c971d5ab99cd9"} pod="openshift-machine-config-operator/machine-config-daemon-p4t85" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 05 22:31:10 crc kubenswrapper[4910]: I0105 22:31:10.954607 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" containerID="cri-o://ecbcf471a393ef5c3d36665028d77090957964738e248ad9f27c971d5ab99cd9" gracePeriod=600 Jan 05 22:31:11 crc kubenswrapper[4910]: E0105 22:31:11.079574 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:31:11 crc kubenswrapper[4910]: I0105 22:31:11.107809 4910 generic.go:334] "Generic (PLEG): container finished" podID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerID="ecbcf471a393ef5c3d36665028d77090957964738e248ad9f27c971d5ab99cd9" exitCode=0 Jan 05 22:31:11 crc kubenswrapper[4910]: I0105 22:31:11.107888 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerDied","Data":"ecbcf471a393ef5c3d36665028d77090957964738e248ad9f27c971d5ab99cd9"} Jan 05 22:31:11 crc kubenswrapper[4910]: I0105 22:31:11.107960 4910 scope.go:117] "RemoveContainer" containerID="d8056f50a258b26e2d83ce053ae1afe592e18b3df0dfc6cb872dacd336e3237d" Jan 05 22:31:11 crc kubenswrapper[4910]: I0105 22:31:11.108625 4910 scope.go:117] "RemoveContainer" containerID="ecbcf471a393ef5c3d36665028d77090957964738e248ad9f27c971d5ab99cd9" Jan 05 22:31:11 crc kubenswrapper[4910]: E0105 22:31:11.108963 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:31:22 crc kubenswrapper[4910]: I0105 22:31:22.721704 4910 scope.go:117] "RemoveContainer" containerID="ecbcf471a393ef5c3d36665028d77090957964738e248ad9f27c971d5ab99cd9" Jan 05 22:31:22 crc kubenswrapper[4910]: E0105 22:31:22.722233 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:31:33 crc kubenswrapper[4910]: I0105 22:31:33.722015 4910 scope.go:117] "RemoveContainer" containerID="ecbcf471a393ef5c3d36665028d77090957964738e248ad9f27c971d5ab99cd9" Jan 05 22:31:33 crc kubenswrapper[4910]: E0105 22:31:33.723152 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:31:46 crc kubenswrapper[4910]: I0105 22:31:46.722083 4910 scope.go:117] "RemoveContainer" containerID="ecbcf471a393ef5c3d36665028d77090957964738e248ad9f27c971d5ab99cd9" Jan 05 22:31:46 crc kubenswrapper[4910]: E0105 22:31:46.723025 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:32:01 crc kubenswrapper[4910]: I0105 22:32:01.721982 4910 scope.go:117] "RemoveContainer" containerID="ecbcf471a393ef5c3d36665028d77090957964738e248ad9f27c971d5ab99cd9" Jan 05 22:32:01 crc kubenswrapper[4910]: E0105 22:32:01.722793 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:32:13 crc kubenswrapper[4910]: I0105 22:32:13.722078 4910 scope.go:117] "RemoveContainer" containerID="ecbcf471a393ef5c3d36665028d77090957964738e248ad9f27c971d5ab99cd9" Jan 05 22:32:13 crc kubenswrapper[4910]: E0105 22:32:13.722948 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:32:27 crc kubenswrapper[4910]: I0105 22:32:27.721411 4910 scope.go:117] "RemoveContainer" containerID="ecbcf471a393ef5c3d36665028d77090957964738e248ad9f27c971d5ab99cd9" Jan 05 22:32:27 crc kubenswrapper[4910]: E0105 22:32:27.723663 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:32:38 crc kubenswrapper[4910]: I0105 22:32:38.726314 4910 scope.go:117] "RemoveContainer" containerID="ecbcf471a393ef5c3d36665028d77090957964738e248ad9f27c971d5ab99cd9" Jan 05 22:32:38 crc kubenswrapper[4910]: E0105 22:32:38.727155 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:32:51 crc kubenswrapper[4910]: I0105 22:32:51.722443 4910 scope.go:117] "RemoveContainer" containerID="ecbcf471a393ef5c3d36665028d77090957964738e248ad9f27c971d5ab99cd9" Jan 05 22:32:51 crc kubenswrapper[4910]: E0105 22:32:51.723254 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:33:03 crc kubenswrapper[4910]: I0105 22:33:03.722596 4910 scope.go:117] "RemoveContainer" containerID="ecbcf471a393ef5c3d36665028d77090957964738e248ad9f27c971d5ab99cd9" Jan 05 22:33:03 crc kubenswrapper[4910]: E0105 22:33:03.723807 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:33:14 crc kubenswrapper[4910]: I0105 22:33:14.721340 4910 scope.go:117] "RemoveContainer" containerID="ecbcf471a393ef5c3d36665028d77090957964738e248ad9f27c971d5ab99cd9" Jan 05 22:33:14 crc kubenswrapper[4910]: E0105 22:33:14.722177 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:33:27 crc kubenswrapper[4910]: I0105 22:33:27.721754 4910 scope.go:117] "RemoveContainer" containerID="ecbcf471a393ef5c3d36665028d77090957964738e248ad9f27c971d5ab99cd9" Jan 05 22:33:27 crc kubenswrapper[4910]: E0105 22:33:27.722540 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:33:41 crc kubenswrapper[4910]: I0105 22:33:41.721729 4910 scope.go:117] "RemoveContainer" containerID="ecbcf471a393ef5c3d36665028d77090957964738e248ad9f27c971d5ab99cd9" Jan 05 22:33:41 crc kubenswrapper[4910]: E0105 22:33:41.722511 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:33:52 crc kubenswrapper[4910]: I0105 22:33:52.721011 4910 scope.go:117] "RemoveContainer" containerID="ecbcf471a393ef5c3d36665028d77090957964738e248ad9f27c971d5ab99cd9" Jan 05 22:33:52 crc kubenswrapper[4910]: E0105 22:33:52.721809 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:34:06 crc kubenswrapper[4910]: I0105 22:34:06.721639 4910 scope.go:117] "RemoveContainer" containerID="ecbcf471a393ef5c3d36665028d77090957964738e248ad9f27c971d5ab99cd9" Jan 05 22:34:06 crc kubenswrapper[4910]: E0105 22:34:06.722903 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:34:19 crc kubenswrapper[4910]: I0105 22:34:19.721655 4910 scope.go:117] "RemoveContainer" containerID="ecbcf471a393ef5c3d36665028d77090957964738e248ad9f27c971d5ab99cd9" Jan 05 22:34:19 crc kubenswrapper[4910]: E0105 22:34:19.722411 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:34:34 crc kubenswrapper[4910]: I0105 22:34:34.722597 4910 scope.go:117] "RemoveContainer" containerID="ecbcf471a393ef5c3d36665028d77090957964738e248ad9f27c971d5ab99cd9" Jan 05 22:34:34 crc kubenswrapper[4910]: E0105 22:34:34.723829 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:34:46 crc kubenswrapper[4910]: I0105 22:34:46.721589 4910 scope.go:117] "RemoveContainer" containerID="ecbcf471a393ef5c3d36665028d77090957964738e248ad9f27c971d5ab99cd9" Jan 05 22:34:46 crc kubenswrapper[4910]: E0105 22:34:46.722580 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:35:00 crc kubenswrapper[4910]: I0105 22:35:00.722533 4910 scope.go:117] "RemoveContainer" containerID="ecbcf471a393ef5c3d36665028d77090957964738e248ad9f27c971d5ab99cd9" Jan 05 22:35:00 crc kubenswrapper[4910]: E0105 22:35:00.723343 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:35:14 crc kubenswrapper[4910]: I0105 22:35:14.721562 4910 scope.go:117] "RemoveContainer" containerID="ecbcf471a393ef5c3d36665028d77090957964738e248ad9f27c971d5ab99cd9" Jan 05 22:35:14 crc kubenswrapper[4910]: E0105 22:35:14.722610 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:35:25 crc kubenswrapper[4910]: I0105 22:35:25.721831 4910 scope.go:117] "RemoveContainer" containerID="ecbcf471a393ef5c3d36665028d77090957964738e248ad9f27c971d5ab99cd9" Jan 05 22:35:25 crc kubenswrapper[4910]: E0105 22:35:25.723786 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:35:40 crc kubenswrapper[4910]: I0105 22:35:40.721533 4910 scope.go:117] "RemoveContainer" containerID="ecbcf471a393ef5c3d36665028d77090957964738e248ad9f27c971d5ab99cd9" Jan 05 22:35:40 crc kubenswrapper[4910]: E0105 22:35:40.722407 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:35:50 crc kubenswrapper[4910]: I0105 22:35:50.919462 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-8ccds"] Jan 05 22:35:50 crc kubenswrapper[4910]: E0105 22:35:50.921832 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46d6f885-2342-45f3-b84e-6ebd88cf4b2d" containerName="collect-profiles" Jan 05 22:35:50 crc kubenswrapper[4910]: I0105 22:35:50.921879 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="46d6f885-2342-45f3-b84e-6ebd88cf4b2d" containerName="collect-profiles" Jan 05 22:35:50 crc kubenswrapper[4910]: I0105 22:35:50.922064 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="46d6f885-2342-45f3-b84e-6ebd88cf4b2d" containerName="collect-profiles" Jan 05 22:35:50 crc kubenswrapper[4910]: I0105 22:35:50.923436 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8ccds" Jan 05 22:35:50 crc kubenswrapper[4910]: I0105 22:35:50.932294 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8ccds"] Jan 05 22:35:51 crc kubenswrapper[4910]: I0105 22:35:51.046348 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/604965f2-b157-4cca-8d3d-f8341f713d82-utilities\") pod \"redhat-marketplace-8ccds\" (UID: \"604965f2-b157-4cca-8d3d-f8341f713d82\") " pod="openshift-marketplace/redhat-marketplace-8ccds" Jan 05 22:35:51 crc kubenswrapper[4910]: I0105 22:35:51.046482 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rn78m\" (UniqueName: \"kubernetes.io/projected/604965f2-b157-4cca-8d3d-f8341f713d82-kube-api-access-rn78m\") pod \"redhat-marketplace-8ccds\" (UID: \"604965f2-b157-4cca-8d3d-f8341f713d82\") " pod="openshift-marketplace/redhat-marketplace-8ccds" Jan 05 22:35:51 crc kubenswrapper[4910]: I0105 22:35:51.046543 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/604965f2-b157-4cca-8d3d-f8341f713d82-catalog-content\") pod \"redhat-marketplace-8ccds\" (UID: \"604965f2-b157-4cca-8d3d-f8341f713d82\") " pod="openshift-marketplace/redhat-marketplace-8ccds" Jan 05 22:35:51 crc kubenswrapper[4910]: I0105 22:35:51.148367 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/604965f2-b157-4cca-8d3d-f8341f713d82-utilities\") pod \"redhat-marketplace-8ccds\" (UID: \"604965f2-b157-4cca-8d3d-f8341f713d82\") " pod="openshift-marketplace/redhat-marketplace-8ccds" Jan 05 22:35:51 crc kubenswrapper[4910]: I0105 22:35:51.148830 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rn78m\" (UniqueName: \"kubernetes.io/projected/604965f2-b157-4cca-8d3d-f8341f713d82-kube-api-access-rn78m\") pod \"redhat-marketplace-8ccds\" (UID: \"604965f2-b157-4cca-8d3d-f8341f713d82\") " pod="openshift-marketplace/redhat-marketplace-8ccds" Jan 05 22:35:51 crc kubenswrapper[4910]: I0105 22:35:51.148954 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/604965f2-b157-4cca-8d3d-f8341f713d82-catalog-content\") pod \"redhat-marketplace-8ccds\" (UID: \"604965f2-b157-4cca-8d3d-f8341f713d82\") " pod="openshift-marketplace/redhat-marketplace-8ccds" Jan 05 22:35:51 crc kubenswrapper[4910]: I0105 22:35:51.148891 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/604965f2-b157-4cca-8d3d-f8341f713d82-utilities\") pod \"redhat-marketplace-8ccds\" (UID: \"604965f2-b157-4cca-8d3d-f8341f713d82\") " pod="openshift-marketplace/redhat-marketplace-8ccds" Jan 05 22:35:51 crc kubenswrapper[4910]: I0105 22:35:51.149284 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/604965f2-b157-4cca-8d3d-f8341f713d82-catalog-content\") pod \"redhat-marketplace-8ccds\" (UID: \"604965f2-b157-4cca-8d3d-f8341f713d82\") " pod="openshift-marketplace/redhat-marketplace-8ccds" Jan 05 22:35:51 crc kubenswrapper[4910]: I0105 22:35:51.171190 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rn78m\" (UniqueName: \"kubernetes.io/projected/604965f2-b157-4cca-8d3d-f8341f713d82-kube-api-access-rn78m\") pod \"redhat-marketplace-8ccds\" (UID: \"604965f2-b157-4cca-8d3d-f8341f713d82\") " pod="openshift-marketplace/redhat-marketplace-8ccds" Jan 05 22:35:51 crc kubenswrapper[4910]: I0105 22:35:51.243814 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8ccds" Jan 05 22:35:51 crc kubenswrapper[4910]: I0105 22:35:51.684860 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8ccds"] Jan 05 22:35:52 crc kubenswrapper[4910]: I0105 22:35:52.398398 4910 generic.go:334] "Generic (PLEG): container finished" podID="604965f2-b157-4cca-8d3d-f8341f713d82" containerID="15caf5a9e23df68f2c5eb89d557df6138886c3a3e4b9ef53bd2aef1cd043acb6" exitCode=0 Jan 05 22:35:52 crc kubenswrapper[4910]: I0105 22:35:52.398484 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8ccds" event={"ID":"604965f2-b157-4cca-8d3d-f8341f713d82","Type":"ContainerDied","Data":"15caf5a9e23df68f2c5eb89d557df6138886c3a3e4b9ef53bd2aef1cd043acb6"} Jan 05 22:35:52 crc kubenswrapper[4910]: I0105 22:35:52.398550 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8ccds" event={"ID":"604965f2-b157-4cca-8d3d-f8341f713d82","Type":"ContainerStarted","Data":"64c4b9195e4cb4bcfd1d3ee700f4b5d840d218109e450f12367e665d75ad073e"} Jan 05 22:35:52 crc kubenswrapper[4910]: I0105 22:35:52.400893 4910 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 05 22:35:53 crc kubenswrapper[4910]: I0105 22:35:53.407960 4910 generic.go:334] "Generic (PLEG): container finished" podID="604965f2-b157-4cca-8d3d-f8341f713d82" containerID="57f5e47e6eebd760675eccafc71bfe08ecec41b58c4d733f57de6017b6491652" exitCode=0 Jan 05 22:35:53 crc kubenswrapper[4910]: I0105 22:35:53.408043 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8ccds" event={"ID":"604965f2-b157-4cca-8d3d-f8341f713d82","Type":"ContainerDied","Data":"57f5e47e6eebd760675eccafc71bfe08ecec41b58c4d733f57de6017b6491652"} Jan 05 22:35:54 crc kubenswrapper[4910]: I0105 22:35:54.418651 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8ccds" event={"ID":"604965f2-b157-4cca-8d3d-f8341f713d82","Type":"ContainerStarted","Data":"ecbecc9de4fec461aba09b2fdb33274440572349fa30d5edd07446e4b6778a40"} Jan 05 22:35:54 crc kubenswrapper[4910]: I0105 22:35:54.446232 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-8ccds" podStartSLOduration=3.017566599 podStartE2EDuration="4.446210443s" podCreationTimestamp="2026-01-05 22:35:50 +0000 UTC" firstStartedPulling="2026-01-05 22:35:52.400594713 +0000 UTC m=+2683.978092393" lastFinishedPulling="2026-01-05 22:35:53.829238567 +0000 UTC m=+2685.406736237" observedRunningTime="2026-01-05 22:35:54.445399333 +0000 UTC m=+2686.022897013" watchObservedRunningTime="2026-01-05 22:35:54.446210443 +0000 UTC m=+2686.023708113" Jan 05 22:35:55 crc kubenswrapper[4910]: I0105 22:35:55.722274 4910 scope.go:117] "RemoveContainer" containerID="ecbcf471a393ef5c3d36665028d77090957964738e248ad9f27c971d5ab99cd9" Jan 05 22:35:55 crc kubenswrapper[4910]: E0105 22:35:55.722862 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:36:01 crc kubenswrapper[4910]: I0105 22:36:01.244531 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-8ccds" Jan 05 22:36:01 crc kubenswrapper[4910]: I0105 22:36:01.245094 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-8ccds" Jan 05 22:36:01 crc kubenswrapper[4910]: I0105 22:36:01.303594 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-8ccds" Jan 05 22:36:01 crc kubenswrapper[4910]: I0105 22:36:01.516500 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-8ccds" Jan 05 22:36:01 crc kubenswrapper[4910]: I0105 22:36:01.561937 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8ccds"] Jan 05 22:36:03 crc kubenswrapper[4910]: I0105 22:36:03.493218 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-8ccds" podUID="604965f2-b157-4cca-8d3d-f8341f713d82" containerName="registry-server" containerID="cri-o://ecbecc9de4fec461aba09b2fdb33274440572349fa30d5edd07446e4b6778a40" gracePeriod=2 Jan 05 22:36:04 crc kubenswrapper[4910]: I0105 22:36:04.424069 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8ccds" Jan 05 22:36:04 crc kubenswrapper[4910]: I0105 22:36:04.490534 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rn78m\" (UniqueName: \"kubernetes.io/projected/604965f2-b157-4cca-8d3d-f8341f713d82-kube-api-access-rn78m\") pod \"604965f2-b157-4cca-8d3d-f8341f713d82\" (UID: \"604965f2-b157-4cca-8d3d-f8341f713d82\") " Jan 05 22:36:04 crc kubenswrapper[4910]: I0105 22:36:04.490706 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/604965f2-b157-4cca-8d3d-f8341f713d82-utilities\") pod \"604965f2-b157-4cca-8d3d-f8341f713d82\" (UID: \"604965f2-b157-4cca-8d3d-f8341f713d82\") " Jan 05 22:36:04 crc kubenswrapper[4910]: I0105 22:36:04.490758 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/604965f2-b157-4cca-8d3d-f8341f713d82-catalog-content\") pod \"604965f2-b157-4cca-8d3d-f8341f713d82\" (UID: \"604965f2-b157-4cca-8d3d-f8341f713d82\") " Jan 05 22:36:04 crc kubenswrapper[4910]: I0105 22:36:04.491983 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/604965f2-b157-4cca-8d3d-f8341f713d82-utilities" (OuterVolumeSpecName: "utilities") pod "604965f2-b157-4cca-8d3d-f8341f713d82" (UID: "604965f2-b157-4cca-8d3d-f8341f713d82"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:36:04 crc kubenswrapper[4910]: I0105 22:36:04.500091 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/604965f2-b157-4cca-8d3d-f8341f713d82-kube-api-access-rn78m" (OuterVolumeSpecName: "kube-api-access-rn78m") pod "604965f2-b157-4cca-8d3d-f8341f713d82" (UID: "604965f2-b157-4cca-8d3d-f8341f713d82"). InnerVolumeSpecName "kube-api-access-rn78m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:36:04 crc kubenswrapper[4910]: I0105 22:36:04.500379 4910 generic.go:334] "Generic (PLEG): container finished" podID="604965f2-b157-4cca-8d3d-f8341f713d82" containerID="ecbecc9de4fec461aba09b2fdb33274440572349fa30d5edd07446e4b6778a40" exitCode=0 Jan 05 22:36:04 crc kubenswrapper[4910]: I0105 22:36:04.500438 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8ccds" event={"ID":"604965f2-b157-4cca-8d3d-f8341f713d82","Type":"ContainerDied","Data":"ecbecc9de4fec461aba09b2fdb33274440572349fa30d5edd07446e4b6778a40"} Jan 05 22:36:04 crc kubenswrapper[4910]: I0105 22:36:04.500467 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8ccds" event={"ID":"604965f2-b157-4cca-8d3d-f8341f713d82","Type":"ContainerDied","Data":"64c4b9195e4cb4bcfd1d3ee700f4b5d840d218109e450f12367e665d75ad073e"} Jan 05 22:36:04 crc kubenswrapper[4910]: I0105 22:36:04.500480 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8ccds" Jan 05 22:36:04 crc kubenswrapper[4910]: I0105 22:36:04.500497 4910 scope.go:117] "RemoveContainer" containerID="ecbecc9de4fec461aba09b2fdb33274440572349fa30d5edd07446e4b6778a40" Jan 05 22:36:04 crc kubenswrapper[4910]: I0105 22:36:04.512281 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/604965f2-b157-4cca-8d3d-f8341f713d82-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "604965f2-b157-4cca-8d3d-f8341f713d82" (UID: "604965f2-b157-4cca-8d3d-f8341f713d82"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:36:04 crc kubenswrapper[4910]: I0105 22:36:04.531069 4910 scope.go:117] "RemoveContainer" containerID="57f5e47e6eebd760675eccafc71bfe08ecec41b58c4d733f57de6017b6491652" Jan 05 22:36:04 crc kubenswrapper[4910]: I0105 22:36:04.551023 4910 scope.go:117] "RemoveContainer" containerID="15caf5a9e23df68f2c5eb89d557df6138886c3a3e4b9ef53bd2aef1cd043acb6" Jan 05 22:36:04 crc kubenswrapper[4910]: I0105 22:36:04.575831 4910 scope.go:117] "RemoveContainer" containerID="ecbecc9de4fec461aba09b2fdb33274440572349fa30d5edd07446e4b6778a40" Jan 05 22:36:04 crc kubenswrapper[4910]: E0105 22:36:04.576688 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ecbecc9de4fec461aba09b2fdb33274440572349fa30d5edd07446e4b6778a40\": container with ID starting with ecbecc9de4fec461aba09b2fdb33274440572349fa30d5edd07446e4b6778a40 not found: ID does not exist" containerID="ecbecc9de4fec461aba09b2fdb33274440572349fa30d5edd07446e4b6778a40" Jan 05 22:36:04 crc kubenswrapper[4910]: I0105 22:36:04.576782 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ecbecc9de4fec461aba09b2fdb33274440572349fa30d5edd07446e4b6778a40"} err="failed to get container status \"ecbecc9de4fec461aba09b2fdb33274440572349fa30d5edd07446e4b6778a40\": rpc error: code = NotFound desc = could not find container \"ecbecc9de4fec461aba09b2fdb33274440572349fa30d5edd07446e4b6778a40\": container with ID starting with ecbecc9de4fec461aba09b2fdb33274440572349fa30d5edd07446e4b6778a40 not found: ID does not exist" Jan 05 22:36:04 crc kubenswrapper[4910]: I0105 22:36:04.576813 4910 scope.go:117] "RemoveContainer" containerID="57f5e47e6eebd760675eccafc71bfe08ecec41b58c4d733f57de6017b6491652" Jan 05 22:36:04 crc kubenswrapper[4910]: E0105 22:36:04.577288 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"57f5e47e6eebd760675eccafc71bfe08ecec41b58c4d733f57de6017b6491652\": container with ID starting with 57f5e47e6eebd760675eccafc71bfe08ecec41b58c4d733f57de6017b6491652 not found: ID does not exist" containerID="57f5e47e6eebd760675eccafc71bfe08ecec41b58c4d733f57de6017b6491652" Jan 05 22:36:04 crc kubenswrapper[4910]: I0105 22:36:04.577328 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"57f5e47e6eebd760675eccafc71bfe08ecec41b58c4d733f57de6017b6491652"} err="failed to get container status \"57f5e47e6eebd760675eccafc71bfe08ecec41b58c4d733f57de6017b6491652\": rpc error: code = NotFound desc = could not find container \"57f5e47e6eebd760675eccafc71bfe08ecec41b58c4d733f57de6017b6491652\": container with ID starting with 57f5e47e6eebd760675eccafc71bfe08ecec41b58c4d733f57de6017b6491652 not found: ID does not exist" Jan 05 22:36:04 crc kubenswrapper[4910]: I0105 22:36:04.577353 4910 scope.go:117] "RemoveContainer" containerID="15caf5a9e23df68f2c5eb89d557df6138886c3a3e4b9ef53bd2aef1cd043acb6" Jan 05 22:36:04 crc kubenswrapper[4910]: E0105 22:36:04.577630 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"15caf5a9e23df68f2c5eb89d557df6138886c3a3e4b9ef53bd2aef1cd043acb6\": container with ID starting with 15caf5a9e23df68f2c5eb89d557df6138886c3a3e4b9ef53bd2aef1cd043acb6 not found: ID does not exist" containerID="15caf5a9e23df68f2c5eb89d557df6138886c3a3e4b9ef53bd2aef1cd043acb6" Jan 05 22:36:04 crc kubenswrapper[4910]: I0105 22:36:04.577684 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"15caf5a9e23df68f2c5eb89d557df6138886c3a3e4b9ef53bd2aef1cd043acb6"} err="failed to get container status \"15caf5a9e23df68f2c5eb89d557df6138886c3a3e4b9ef53bd2aef1cd043acb6\": rpc error: code = NotFound desc = could not find container \"15caf5a9e23df68f2c5eb89d557df6138886c3a3e4b9ef53bd2aef1cd043acb6\": container with ID starting with 15caf5a9e23df68f2c5eb89d557df6138886c3a3e4b9ef53bd2aef1cd043acb6 not found: ID does not exist" Jan 05 22:36:04 crc kubenswrapper[4910]: I0105 22:36:04.592783 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/604965f2-b157-4cca-8d3d-f8341f713d82-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 22:36:04 crc kubenswrapper[4910]: I0105 22:36:04.592810 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/604965f2-b157-4cca-8d3d-f8341f713d82-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 22:36:04 crc kubenswrapper[4910]: I0105 22:36:04.592823 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rn78m\" (UniqueName: \"kubernetes.io/projected/604965f2-b157-4cca-8d3d-f8341f713d82-kube-api-access-rn78m\") on node \"crc\" DevicePath \"\"" Jan 05 22:36:04 crc kubenswrapper[4910]: I0105 22:36:04.838065 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8ccds"] Jan 05 22:36:04 crc kubenswrapper[4910]: I0105 22:36:04.849861 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-8ccds"] Jan 05 22:36:06 crc kubenswrapper[4910]: I0105 22:36:06.736918 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="604965f2-b157-4cca-8d3d-f8341f713d82" path="/var/lib/kubelet/pods/604965f2-b157-4cca-8d3d-f8341f713d82/volumes" Jan 05 22:36:06 crc kubenswrapper[4910]: I0105 22:36:06.963794 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-f98gq"] Jan 05 22:36:06 crc kubenswrapper[4910]: E0105 22:36:06.964794 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="604965f2-b157-4cca-8d3d-f8341f713d82" containerName="registry-server" Jan 05 22:36:06 crc kubenswrapper[4910]: I0105 22:36:06.964825 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="604965f2-b157-4cca-8d3d-f8341f713d82" containerName="registry-server" Jan 05 22:36:06 crc kubenswrapper[4910]: E0105 22:36:06.964862 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="604965f2-b157-4cca-8d3d-f8341f713d82" containerName="extract-utilities" Jan 05 22:36:06 crc kubenswrapper[4910]: I0105 22:36:06.964878 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="604965f2-b157-4cca-8d3d-f8341f713d82" containerName="extract-utilities" Jan 05 22:36:06 crc kubenswrapper[4910]: E0105 22:36:06.964903 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="604965f2-b157-4cca-8d3d-f8341f713d82" containerName="extract-content" Jan 05 22:36:06 crc kubenswrapper[4910]: I0105 22:36:06.964916 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="604965f2-b157-4cca-8d3d-f8341f713d82" containerName="extract-content" Jan 05 22:36:06 crc kubenswrapper[4910]: I0105 22:36:06.965278 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="604965f2-b157-4cca-8d3d-f8341f713d82" containerName="registry-server" Jan 05 22:36:06 crc kubenswrapper[4910]: I0105 22:36:06.967809 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f98gq" Jan 05 22:36:06 crc kubenswrapper[4910]: I0105 22:36:06.969506 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-f98gq"] Jan 05 22:36:07 crc kubenswrapper[4910]: I0105 22:36:07.031501 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f84566eb-6d48-493a-9c2a-349c29c505f8-catalog-content\") pod \"redhat-operators-f98gq\" (UID: \"f84566eb-6d48-493a-9c2a-349c29c505f8\") " pod="openshift-marketplace/redhat-operators-f98gq" Jan 05 22:36:07 crc kubenswrapper[4910]: I0105 22:36:07.031850 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f84566eb-6d48-493a-9c2a-349c29c505f8-utilities\") pod \"redhat-operators-f98gq\" (UID: \"f84566eb-6d48-493a-9c2a-349c29c505f8\") " pod="openshift-marketplace/redhat-operators-f98gq" Jan 05 22:36:07 crc kubenswrapper[4910]: I0105 22:36:07.031940 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-klsdt\" (UniqueName: \"kubernetes.io/projected/f84566eb-6d48-493a-9c2a-349c29c505f8-kube-api-access-klsdt\") pod \"redhat-operators-f98gq\" (UID: \"f84566eb-6d48-493a-9c2a-349c29c505f8\") " pod="openshift-marketplace/redhat-operators-f98gq" Jan 05 22:36:07 crc kubenswrapper[4910]: I0105 22:36:07.133548 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f84566eb-6d48-493a-9c2a-349c29c505f8-utilities\") pod \"redhat-operators-f98gq\" (UID: \"f84566eb-6d48-493a-9c2a-349c29c505f8\") " pod="openshift-marketplace/redhat-operators-f98gq" Jan 05 22:36:07 crc kubenswrapper[4910]: I0105 22:36:07.133648 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-klsdt\" (UniqueName: \"kubernetes.io/projected/f84566eb-6d48-493a-9c2a-349c29c505f8-kube-api-access-klsdt\") pod \"redhat-operators-f98gq\" (UID: \"f84566eb-6d48-493a-9c2a-349c29c505f8\") " pod="openshift-marketplace/redhat-operators-f98gq" Jan 05 22:36:07 crc kubenswrapper[4910]: I0105 22:36:07.134180 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f84566eb-6d48-493a-9c2a-349c29c505f8-catalog-content\") pod \"redhat-operators-f98gq\" (UID: \"f84566eb-6d48-493a-9c2a-349c29c505f8\") " pod="openshift-marketplace/redhat-operators-f98gq" Jan 05 22:36:07 crc kubenswrapper[4910]: I0105 22:36:07.134248 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f84566eb-6d48-493a-9c2a-349c29c505f8-utilities\") pod \"redhat-operators-f98gq\" (UID: \"f84566eb-6d48-493a-9c2a-349c29c505f8\") " pod="openshift-marketplace/redhat-operators-f98gq" Jan 05 22:36:07 crc kubenswrapper[4910]: I0105 22:36:07.134606 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f84566eb-6d48-493a-9c2a-349c29c505f8-catalog-content\") pod \"redhat-operators-f98gq\" (UID: \"f84566eb-6d48-493a-9c2a-349c29c505f8\") " pod="openshift-marketplace/redhat-operators-f98gq" Jan 05 22:36:07 crc kubenswrapper[4910]: I0105 22:36:07.161829 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-klsdt\" (UniqueName: \"kubernetes.io/projected/f84566eb-6d48-493a-9c2a-349c29c505f8-kube-api-access-klsdt\") pod \"redhat-operators-f98gq\" (UID: \"f84566eb-6d48-493a-9c2a-349c29c505f8\") " pod="openshift-marketplace/redhat-operators-f98gq" Jan 05 22:36:07 crc kubenswrapper[4910]: I0105 22:36:07.298134 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f98gq" Jan 05 22:36:07 crc kubenswrapper[4910]: I0105 22:36:07.534317 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-f98gq"] Jan 05 22:36:08 crc kubenswrapper[4910]: I0105 22:36:08.713900 4910 generic.go:334] "Generic (PLEG): container finished" podID="f84566eb-6d48-493a-9c2a-349c29c505f8" containerID="936354c9f66b1263f1b12ce7c9c245c39fa26d3a1aac8f47b9e993f23e4df4af" exitCode=0 Jan 05 22:36:08 crc kubenswrapper[4910]: I0105 22:36:08.714040 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f98gq" event={"ID":"f84566eb-6d48-493a-9c2a-349c29c505f8","Type":"ContainerDied","Data":"936354c9f66b1263f1b12ce7c9c245c39fa26d3a1aac8f47b9e993f23e4df4af"} Jan 05 22:36:08 crc kubenswrapper[4910]: I0105 22:36:08.715898 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f98gq" event={"ID":"f84566eb-6d48-493a-9c2a-349c29c505f8","Type":"ContainerStarted","Data":"bf7bd8ab55d3b1781a06830ee5be2f6cf88156b66f37b7dc4723c3c3a3ab69e8"} Jan 05 22:36:09 crc kubenswrapper[4910]: I0105 22:36:09.723257 4910 scope.go:117] "RemoveContainer" containerID="ecbcf471a393ef5c3d36665028d77090957964738e248ad9f27c971d5ab99cd9" Jan 05 22:36:09 crc kubenswrapper[4910]: E0105 22:36:09.724080 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:36:10 crc kubenswrapper[4910]: I0105 22:36:10.757348 4910 generic.go:334] "Generic (PLEG): container finished" podID="f84566eb-6d48-493a-9c2a-349c29c505f8" containerID="d260d707f347d35eab6de718d3f9c37b4f2918cff683e64f3a8b3dd585030df0" exitCode=0 Jan 05 22:36:10 crc kubenswrapper[4910]: I0105 22:36:10.757411 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f98gq" event={"ID":"f84566eb-6d48-493a-9c2a-349c29c505f8","Type":"ContainerDied","Data":"d260d707f347d35eab6de718d3f9c37b4f2918cff683e64f3a8b3dd585030df0"} Jan 05 22:36:11 crc kubenswrapper[4910]: I0105 22:36:11.769520 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f98gq" event={"ID":"f84566eb-6d48-493a-9c2a-349c29c505f8","Type":"ContainerStarted","Data":"50b4c523246f0a6c966a2aec7557ce4bb7be383b2ae1787935bcd61b51583768"} Jan 05 22:36:11 crc kubenswrapper[4910]: I0105 22:36:11.793735 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-f98gq" podStartSLOduration=3.259799844 podStartE2EDuration="5.793716607s" podCreationTimestamp="2026-01-05 22:36:06 +0000 UTC" firstStartedPulling="2026-01-05 22:36:08.718435173 +0000 UTC m=+2700.295932883" lastFinishedPulling="2026-01-05 22:36:11.252351976 +0000 UTC m=+2702.829849646" observedRunningTime="2026-01-05 22:36:11.790834464 +0000 UTC m=+2703.368332134" watchObservedRunningTime="2026-01-05 22:36:11.793716607 +0000 UTC m=+2703.371214277" Jan 05 22:36:17 crc kubenswrapper[4910]: I0105 22:36:17.298882 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-f98gq" Jan 05 22:36:17 crc kubenswrapper[4910]: I0105 22:36:17.301576 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-f98gq" Jan 05 22:36:17 crc kubenswrapper[4910]: I0105 22:36:17.356908 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-f98gq" Jan 05 22:36:17 crc kubenswrapper[4910]: I0105 22:36:17.873152 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-f98gq" Jan 05 22:36:17 crc kubenswrapper[4910]: I0105 22:36:17.931921 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-f98gq"] Jan 05 22:36:19 crc kubenswrapper[4910]: I0105 22:36:19.838640 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-f98gq" podUID="f84566eb-6d48-493a-9c2a-349c29c505f8" containerName="registry-server" containerID="cri-o://50b4c523246f0a6c966a2aec7557ce4bb7be383b2ae1787935bcd61b51583768" gracePeriod=2 Jan 05 22:36:22 crc kubenswrapper[4910]: I0105 22:36:22.864878 4910 generic.go:334] "Generic (PLEG): container finished" podID="f84566eb-6d48-493a-9c2a-349c29c505f8" containerID="50b4c523246f0a6c966a2aec7557ce4bb7be383b2ae1787935bcd61b51583768" exitCode=0 Jan 05 22:36:22 crc kubenswrapper[4910]: I0105 22:36:22.865268 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f98gq" event={"ID":"f84566eb-6d48-493a-9c2a-349c29c505f8","Type":"ContainerDied","Data":"50b4c523246f0a6c966a2aec7557ce4bb7be383b2ae1787935bcd61b51583768"} Jan 05 22:36:23 crc kubenswrapper[4910]: I0105 22:36:23.266469 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f98gq" Jan 05 22:36:23 crc kubenswrapper[4910]: I0105 22:36:23.384625 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-klsdt\" (UniqueName: \"kubernetes.io/projected/f84566eb-6d48-493a-9c2a-349c29c505f8-kube-api-access-klsdt\") pod \"f84566eb-6d48-493a-9c2a-349c29c505f8\" (UID: \"f84566eb-6d48-493a-9c2a-349c29c505f8\") " Jan 05 22:36:23 crc kubenswrapper[4910]: I0105 22:36:23.384770 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f84566eb-6d48-493a-9c2a-349c29c505f8-catalog-content\") pod \"f84566eb-6d48-493a-9c2a-349c29c505f8\" (UID: \"f84566eb-6d48-493a-9c2a-349c29c505f8\") " Jan 05 22:36:23 crc kubenswrapper[4910]: I0105 22:36:23.384939 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f84566eb-6d48-493a-9c2a-349c29c505f8-utilities\") pod \"f84566eb-6d48-493a-9c2a-349c29c505f8\" (UID: \"f84566eb-6d48-493a-9c2a-349c29c505f8\") " Jan 05 22:36:23 crc kubenswrapper[4910]: I0105 22:36:23.385999 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f84566eb-6d48-493a-9c2a-349c29c505f8-utilities" (OuterVolumeSpecName: "utilities") pod "f84566eb-6d48-493a-9c2a-349c29c505f8" (UID: "f84566eb-6d48-493a-9c2a-349c29c505f8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:36:23 crc kubenswrapper[4910]: I0105 22:36:23.392328 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f84566eb-6d48-493a-9c2a-349c29c505f8-kube-api-access-klsdt" (OuterVolumeSpecName: "kube-api-access-klsdt") pod "f84566eb-6d48-493a-9c2a-349c29c505f8" (UID: "f84566eb-6d48-493a-9c2a-349c29c505f8"). InnerVolumeSpecName "kube-api-access-klsdt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:36:23 crc kubenswrapper[4910]: I0105 22:36:23.487056 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f84566eb-6d48-493a-9c2a-349c29c505f8-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 22:36:23 crc kubenswrapper[4910]: I0105 22:36:23.487101 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-klsdt\" (UniqueName: \"kubernetes.io/projected/f84566eb-6d48-493a-9c2a-349c29c505f8-kube-api-access-klsdt\") on node \"crc\" DevicePath \"\"" Jan 05 22:36:23 crc kubenswrapper[4910]: I0105 22:36:23.526070 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f84566eb-6d48-493a-9c2a-349c29c505f8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f84566eb-6d48-493a-9c2a-349c29c505f8" (UID: "f84566eb-6d48-493a-9c2a-349c29c505f8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:36:23 crc kubenswrapper[4910]: I0105 22:36:23.589255 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f84566eb-6d48-493a-9c2a-349c29c505f8-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 22:36:23 crc kubenswrapper[4910]: I0105 22:36:23.874580 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f98gq" event={"ID":"f84566eb-6d48-493a-9c2a-349c29c505f8","Type":"ContainerDied","Data":"bf7bd8ab55d3b1781a06830ee5be2f6cf88156b66f37b7dc4723c3c3a3ab69e8"} Jan 05 22:36:23 crc kubenswrapper[4910]: I0105 22:36:23.874655 4910 scope.go:117] "RemoveContainer" containerID="50b4c523246f0a6c966a2aec7557ce4bb7be383b2ae1787935bcd61b51583768" Jan 05 22:36:23 crc kubenswrapper[4910]: I0105 22:36:23.874636 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f98gq" Jan 05 22:36:23 crc kubenswrapper[4910]: I0105 22:36:23.906341 4910 scope.go:117] "RemoveContainer" containerID="d260d707f347d35eab6de718d3f9c37b4f2918cff683e64f3a8b3dd585030df0" Jan 05 22:36:23 crc kubenswrapper[4910]: I0105 22:36:23.921223 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-f98gq"] Jan 05 22:36:23 crc kubenswrapper[4910]: I0105 22:36:23.937666 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-f98gq"] Jan 05 22:36:23 crc kubenswrapper[4910]: I0105 22:36:23.947322 4910 scope.go:117] "RemoveContainer" containerID="936354c9f66b1263f1b12ce7c9c245c39fa26d3a1aac8f47b9e993f23e4df4af" Jan 05 22:36:24 crc kubenswrapper[4910]: I0105 22:36:24.722176 4910 scope.go:117] "RemoveContainer" containerID="ecbcf471a393ef5c3d36665028d77090957964738e248ad9f27c971d5ab99cd9" Jan 05 22:36:24 crc kubenswrapper[4910]: I0105 22:36:24.729521 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f84566eb-6d48-493a-9c2a-349c29c505f8" path="/var/lib/kubelet/pods/f84566eb-6d48-493a-9c2a-349c29c505f8/volumes" Jan 05 22:36:25 crc kubenswrapper[4910]: I0105 22:36:25.889990 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerStarted","Data":"eb350b487bd18c2ab5f2ccaf6cb00d7658aebb5a5c6834dc939a14ab26869bf0"} Jan 05 22:38:40 crc kubenswrapper[4910]: I0105 22:38:40.953114 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:38:40 crc kubenswrapper[4910]: I0105 22:38:40.954060 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:39:10 crc kubenswrapper[4910]: I0105 22:39:10.956005 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:39:10 crc kubenswrapper[4910]: I0105 22:39:10.956770 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:39:12 crc kubenswrapper[4910]: I0105 22:39:12.172254 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-vxlr7"] Jan 05 22:39:12 crc kubenswrapper[4910]: E0105 22:39:12.172725 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f84566eb-6d48-493a-9c2a-349c29c505f8" containerName="extract-content" Jan 05 22:39:12 crc kubenswrapper[4910]: I0105 22:39:12.172748 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f84566eb-6d48-493a-9c2a-349c29c505f8" containerName="extract-content" Jan 05 22:39:12 crc kubenswrapper[4910]: E0105 22:39:12.172766 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f84566eb-6d48-493a-9c2a-349c29c505f8" containerName="extract-utilities" Jan 05 22:39:12 crc kubenswrapper[4910]: I0105 22:39:12.172777 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f84566eb-6d48-493a-9c2a-349c29c505f8" containerName="extract-utilities" Jan 05 22:39:12 crc kubenswrapper[4910]: E0105 22:39:12.172794 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f84566eb-6d48-493a-9c2a-349c29c505f8" containerName="registry-server" Jan 05 22:39:12 crc kubenswrapper[4910]: I0105 22:39:12.172805 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f84566eb-6d48-493a-9c2a-349c29c505f8" containerName="registry-server" Jan 05 22:39:12 crc kubenswrapper[4910]: I0105 22:39:12.173043 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f84566eb-6d48-493a-9c2a-349c29c505f8" containerName="registry-server" Jan 05 22:39:12 crc kubenswrapper[4910]: I0105 22:39:12.176824 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vxlr7" Jan 05 22:39:12 crc kubenswrapper[4910]: I0105 22:39:12.202793 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vxlr7"] Jan 05 22:39:12 crc kubenswrapper[4910]: I0105 22:39:12.276792 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6afeea5-b246-4ff7-ba44-2b0f17c7f063-catalog-content\") pod \"certified-operators-vxlr7\" (UID: \"a6afeea5-b246-4ff7-ba44-2b0f17c7f063\") " pod="openshift-marketplace/certified-operators-vxlr7" Jan 05 22:39:12 crc kubenswrapper[4910]: I0105 22:39:12.276868 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6afeea5-b246-4ff7-ba44-2b0f17c7f063-utilities\") pod \"certified-operators-vxlr7\" (UID: \"a6afeea5-b246-4ff7-ba44-2b0f17c7f063\") " pod="openshift-marketplace/certified-operators-vxlr7" Jan 05 22:39:12 crc kubenswrapper[4910]: I0105 22:39:12.276889 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrxsr\" (UniqueName: \"kubernetes.io/projected/a6afeea5-b246-4ff7-ba44-2b0f17c7f063-kube-api-access-rrxsr\") pod \"certified-operators-vxlr7\" (UID: \"a6afeea5-b246-4ff7-ba44-2b0f17c7f063\") " pod="openshift-marketplace/certified-operators-vxlr7" Jan 05 22:39:12 crc kubenswrapper[4910]: I0105 22:39:12.377797 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6afeea5-b246-4ff7-ba44-2b0f17c7f063-utilities\") pod \"certified-operators-vxlr7\" (UID: \"a6afeea5-b246-4ff7-ba44-2b0f17c7f063\") " pod="openshift-marketplace/certified-operators-vxlr7" Jan 05 22:39:12 crc kubenswrapper[4910]: I0105 22:39:12.377842 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrxsr\" (UniqueName: \"kubernetes.io/projected/a6afeea5-b246-4ff7-ba44-2b0f17c7f063-kube-api-access-rrxsr\") pod \"certified-operators-vxlr7\" (UID: \"a6afeea5-b246-4ff7-ba44-2b0f17c7f063\") " pod="openshift-marketplace/certified-operators-vxlr7" Jan 05 22:39:12 crc kubenswrapper[4910]: I0105 22:39:12.377932 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6afeea5-b246-4ff7-ba44-2b0f17c7f063-catalog-content\") pod \"certified-operators-vxlr7\" (UID: \"a6afeea5-b246-4ff7-ba44-2b0f17c7f063\") " pod="openshift-marketplace/certified-operators-vxlr7" Jan 05 22:39:12 crc kubenswrapper[4910]: I0105 22:39:12.378425 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6afeea5-b246-4ff7-ba44-2b0f17c7f063-utilities\") pod \"certified-operators-vxlr7\" (UID: \"a6afeea5-b246-4ff7-ba44-2b0f17c7f063\") " pod="openshift-marketplace/certified-operators-vxlr7" Jan 05 22:39:12 crc kubenswrapper[4910]: I0105 22:39:12.378502 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6afeea5-b246-4ff7-ba44-2b0f17c7f063-catalog-content\") pod \"certified-operators-vxlr7\" (UID: \"a6afeea5-b246-4ff7-ba44-2b0f17c7f063\") " pod="openshift-marketplace/certified-operators-vxlr7" Jan 05 22:39:12 crc kubenswrapper[4910]: I0105 22:39:12.404222 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrxsr\" (UniqueName: \"kubernetes.io/projected/a6afeea5-b246-4ff7-ba44-2b0f17c7f063-kube-api-access-rrxsr\") pod \"certified-operators-vxlr7\" (UID: \"a6afeea5-b246-4ff7-ba44-2b0f17c7f063\") " pod="openshift-marketplace/certified-operators-vxlr7" Jan 05 22:39:12 crc kubenswrapper[4910]: I0105 22:39:12.504055 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vxlr7" Jan 05 22:39:12 crc kubenswrapper[4910]: I0105 22:39:12.963777 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vxlr7"] Jan 05 22:39:13 crc kubenswrapper[4910]: I0105 22:39:13.273604 4910 generic.go:334] "Generic (PLEG): container finished" podID="a6afeea5-b246-4ff7-ba44-2b0f17c7f063" containerID="82e1c9919549993c726a361570bd60294bc146558cd4dfee0c191f4306fc44a6" exitCode=0 Jan 05 22:39:13 crc kubenswrapper[4910]: I0105 22:39:13.273646 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vxlr7" event={"ID":"a6afeea5-b246-4ff7-ba44-2b0f17c7f063","Type":"ContainerDied","Data":"82e1c9919549993c726a361570bd60294bc146558cd4dfee0c191f4306fc44a6"} Jan 05 22:39:13 crc kubenswrapper[4910]: I0105 22:39:13.273674 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vxlr7" event={"ID":"a6afeea5-b246-4ff7-ba44-2b0f17c7f063","Type":"ContainerStarted","Data":"31da604c1450277ebc4916f006ff8533cea4751b6b98b5264edcc9c1c9afafff"} Jan 05 22:39:14 crc kubenswrapper[4910]: I0105 22:39:14.167064 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-dbf9r"] Jan 05 22:39:14 crc kubenswrapper[4910]: I0105 22:39:14.171935 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dbf9r" Jan 05 22:39:14 crc kubenswrapper[4910]: I0105 22:39:14.182556 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dbf9r"] Jan 05 22:39:14 crc kubenswrapper[4910]: I0105 22:39:14.210543 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjgwn\" (UniqueName: \"kubernetes.io/projected/43d815cb-9ec9-4523-acdf-ad9f6d60650c-kube-api-access-sjgwn\") pod \"community-operators-dbf9r\" (UID: \"43d815cb-9ec9-4523-acdf-ad9f6d60650c\") " pod="openshift-marketplace/community-operators-dbf9r" Jan 05 22:39:14 crc kubenswrapper[4910]: I0105 22:39:14.210687 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43d815cb-9ec9-4523-acdf-ad9f6d60650c-catalog-content\") pod \"community-operators-dbf9r\" (UID: \"43d815cb-9ec9-4523-acdf-ad9f6d60650c\") " pod="openshift-marketplace/community-operators-dbf9r" Jan 05 22:39:14 crc kubenswrapper[4910]: I0105 22:39:14.210929 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43d815cb-9ec9-4523-acdf-ad9f6d60650c-utilities\") pod \"community-operators-dbf9r\" (UID: \"43d815cb-9ec9-4523-acdf-ad9f6d60650c\") " pod="openshift-marketplace/community-operators-dbf9r" Jan 05 22:39:14 crc kubenswrapper[4910]: I0105 22:39:14.312278 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjgwn\" (UniqueName: \"kubernetes.io/projected/43d815cb-9ec9-4523-acdf-ad9f6d60650c-kube-api-access-sjgwn\") pod \"community-operators-dbf9r\" (UID: \"43d815cb-9ec9-4523-acdf-ad9f6d60650c\") " pod="openshift-marketplace/community-operators-dbf9r" Jan 05 22:39:14 crc kubenswrapper[4910]: I0105 22:39:14.312325 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43d815cb-9ec9-4523-acdf-ad9f6d60650c-catalog-content\") pod \"community-operators-dbf9r\" (UID: \"43d815cb-9ec9-4523-acdf-ad9f6d60650c\") " pod="openshift-marketplace/community-operators-dbf9r" Jan 05 22:39:14 crc kubenswrapper[4910]: I0105 22:39:14.312388 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43d815cb-9ec9-4523-acdf-ad9f6d60650c-utilities\") pod \"community-operators-dbf9r\" (UID: \"43d815cb-9ec9-4523-acdf-ad9f6d60650c\") " pod="openshift-marketplace/community-operators-dbf9r" Jan 05 22:39:14 crc kubenswrapper[4910]: I0105 22:39:14.312841 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43d815cb-9ec9-4523-acdf-ad9f6d60650c-catalog-content\") pod \"community-operators-dbf9r\" (UID: \"43d815cb-9ec9-4523-acdf-ad9f6d60650c\") " pod="openshift-marketplace/community-operators-dbf9r" Jan 05 22:39:14 crc kubenswrapper[4910]: I0105 22:39:14.312923 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43d815cb-9ec9-4523-acdf-ad9f6d60650c-utilities\") pod \"community-operators-dbf9r\" (UID: \"43d815cb-9ec9-4523-acdf-ad9f6d60650c\") " pod="openshift-marketplace/community-operators-dbf9r" Jan 05 22:39:14 crc kubenswrapper[4910]: I0105 22:39:14.344712 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjgwn\" (UniqueName: \"kubernetes.io/projected/43d815cb-9ec9-4523-acdf-ad9f6d60650c-kube-api-access-sjgwn\") pod \"community-operators-dbf9r\" (UID: \"43d815cb-9ec9-4523-acdf-ad9f6d60650c\") " pod="openshift-marketplace/community-operators-dbf9r" Jan 05 22:39:14 crc kubenswrapper[4910]: I0105 22:39:14.495344 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dbf9r" Jan 05 22:39:14 crc kubenswrapper[4910]: I0105 22:39:14.787605 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dbf9r"] Jan 05 22:39:15 crc kubenswrapper[4910]: I0105 22:39:15.289678 4910 generic.go:334] "Generic (PLEG): container finished" podID="43d815cb-9ec9-4523-acdf-ad9f6d60650c" containerID="c4c70f6a6ef53fa5a25c6cafc2a45f8b1612998b0708a6b7351f1f96dfd6d07c" exitCode=0 Jan 05 22:39:15 crc kubenswrapper[4910]: I0105 22:39:15.289775 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dbf9r" event={"ID":"43d815cb-9ec9-4523-acdf-ad9f6d60650c","Type":"ContainerDied","Data":"c4c70f6a6ef53fa5a25c6cafc2a45f8b1612998b0708a6b7351f1f96dfd6d07c"} Jan 05 22:39:15 crc kubenswrapper[4910]: I0105 22:39:15.290042 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dbf9r" event={"ID":"43d815cb-9ec9-4523-acdf-ad9f6d60650c","Type":"ContainerStarted","Data":"20982779ff59b4036a41700e44dd87f9015633eac5c80a717d583c68c3f77453"} Jan 05 22:39:15 crc kubenswrapper[4910]: I0105 22:39:15.293771 4910 generic.go:334] "Generic (PLEG): container finished" podID="a6afeea5-b246-4ff7-ba44-2b0f17c7f063" containerID="22d40c07f3eae4cad5d8b1b6503f2fcb8c6e3e8c25e9a89ea09d3cabdec2a4c6" exitCode=0 Jan 05 22:39:15 crc kubenswrapper[4910]: I0105 22:39:15.293814 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vxlr7" event={"ID":"a6afeea5-b246-4ff7-ba44-2b0f17c7f063","Type":"ContainerDied","Data":"22d40c07f3eae4cad5d8b1b6503f2fcb8c6e3e8c25e9a89ea09d3cabdec2a4c6"} Jan 05 22:39:16 crc kubenswrapper[4910]: I0105 22:39:16.318215 4910 generic.go:334] "Generic (PLEG): container finished" podID="43d815cb-9ec9-4523-acdf-ad9f6d60650c" containerID="3c20e827c1b9a11f3d323f1e30886ea52e3a164df2a0a90535084d0207b2337c" exitCode=0 Jan 05 22:39:16 crc kubenswrapper[4910]: I0105 22:39:16.318323 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dbf9r" event={"ID":"43d815cb-9ec9-4523-acdf-ad9f6d60650c","Type":"ContainerDied","Data":"3c20e827c1b9a11f3d323f1e30886ea52e3a164df2a0a90535084d0207b2337c"} Jan 05 22:39:16 crc kubenswrapper[4910]: I0105 22:39:16.321361 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vxlr7" event={"ID":"a6afeea5-b246-4ff7-ba44-2b0f17c7f063","Type":"ContainerStarted","Data":"19a2bfe76cc46923485313c10a7f6e7b1c06d9c846904e2d47583622ee7f8418"} Jan 05 22:39:16 crc kubenswrapper[4910]: I0105 22:39:16.358147 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-vxlr7" podStartSLOduration=1.7963641529999999 podStartE2EDuration="4.358111423s" podCreationTimestamp="2026-01-05 22:39:12 +0000 UTC" firstStartedPulling="2026-01-05 22:39:13.275505756 +0000 UTC m=+2884.853003436" lastFinishedPulling="2026-01-05 22:39:15.837253036 +0000 UTC m=+2887.414750706" observedRunningTime="2026-01-05 22:39:16.357682683 +0000 UTC m=+2887.935180353" watchObservedRunningTime="2026-01-05 22:39:16.358111423 +0000 UTC m=+2887.935609093" Jan 05 22:39:17 crc kubenswrapper[4910]: I0105 22:39:17.331398 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dbf9r" event={"ID":"43d815cb-9ec9-4523-acdf-ad9f6d60650c","Type":"ContainerStarted","Data":"faf791c9b1f4e611caa5b2322b439f5a68056caad1334310022defbce95231af"} Jan 05 22:39:17 crc kubenswrapper[4910]: I0105 22:39:17.355347 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-dbf9r" podStartSLOduration=1.767774394 podStartE2EDuration="3.355324753s" podCreationTimestamp="2026-01-05 22:39:14 +0000 UTC" firstStartedPulling="2026-01-05 22:39:15.292882749 +0000 UTC m=+2886.870380419" lastFinishedPulling="2026-01-05 22:39:16.880433098 +0000 UTC m=+2888.457930778" observedRunningTime="2026-01-05 22:39:17.352155033 +0000 UTC m=+2888.929652703" watchObservedRunningTime="2026-01-05 22:39:17.355324753 +0000 UTC m=+2888.932822423" Jan 05 22:39:22 crc kubenswrapper[4910]: I0105 22:39:22.505072 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-vxlr7" Jan 05 22:39:22 crc kubenswrapper[4910]: I0105 22:39:22.506908 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-vxlr7" Jan 05 22:39:22 crc kubenswrapper[4910]: I0105 22:39:22.582346 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-vxlr7" Jan 05 22:39:23 crc kubenswrapper[4910]: I0105 22:39:23.425629 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-vxlr7" Jan 05 22:39:23 crc kubenswrapper[4910]: I0105 22:39:23.470059 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vxlr7"] Jan 05 22:39:24 crc kubenswrapper[4910]: I0105 22:39:24.496303 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-dbf9r" Jan 05 22:39:24 crc kubenswrapper[4910]: I0105 22:39:24.496359 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-dbf9r" Jan 05 22:39:24 crc kubenswrapper[4910]: I0105 22:39:24.571250 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-dbf9r" Jan 05 22:39:25 crc kubenswrapper[4910]: I0105 22:39:25.395149 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-vxlr7" podUID="a6afeea5-b246-4ff7-ba44-2b0f17c7f063" containerName="registry-server" containerID="cri-o://19a2bfe76cc46923485313c10a7f6e7b1c06d9c846904e2d47583622ee7f8418" gracePeriod=2 Jan 05 22:39:25 crc kubenswrapper[4910]: I0105 22:39:25.444217 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-dbf9r" Jan 05 22:39:26 crc kubenswrapper[4910]: I0105 22:39:26.213888 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-dbf9r"] Jan 05 22:39:27 crc kubenswrapper[4910]: I0105 22:39:27.410621 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-dbf9r" podUID="43d815cb-9ec9-4523-acdf-ad9f6d60650c" containerName="registry-server" containerID="cri-o://faf791c9b1f4e611caa5b2322b439f5a68056caad1334310022defbce95231af" gracePeriod=2 Jan 05 22:39:28 crc kubenswrapper[4910]: I0105 22:39:28.422881 4910 generic.go:334] "Generic (PLEG): container finished" podID="43d815cb-9ec9-4523-acdf-ad9f6d60650c" containerID="faf791c9b1f4e611caa5b2322b439f5a68056caad1334310022defbce95231af" exitCode=0 Jan 05 22:39:28 crc kubenswrapper[4910]: I0105 22:39:28.422990 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dbf9r" event={"ID":"43d815cb-9ec9-4523-acdf-ad9f6d60650c","Type":"ContainerDied","Data":"faf791c9b1f4e611caa5b2322b439f5a68056caad1334310022defbce95231af"} Jan 05 22:39:28 crc kubenswrapper[4910]: I0105 22:39:28.428533 4910 generic.go:334] "Generic (PLEG): container finished" podID="a6afeea5-b246-4ff7-ba44-2b0f17c7f063" containerID="19a2bfe76cc46923485313c10a7f6e7b1c06d9c846904e2d47583622ee7f8418" exitCode=0 Jan 05 22:39:28 crc kubenswrapper[4910]: I0105 22:39:28.428588 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vxlr7" event={"ID":"a6afeea5-b246-4ff7-ba44-2b0f17c7f063","Type":"ContainerDied","Data":"19a2bfe76cc46923485313c10a7f6e7b1c06d9c846904e2d47583622ee7f8418"} Jan 05 22:39:29 crc kubenswrapper[4910]: I0105 22:39:29.209334 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dbf9r" Jan 05 22:39:29 crc kubenswrapper[4910]: I0105 22:39:29.346747 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43d815cb-9ec9-4523-acdf-ad9f6d60650c-catalog-content\") pod \"43d815cb-9ec9-4523-acdf-ad9f6d60650c\" (UID: \"43d815cb-9ec9-4523-acdf-ad9f6d60650c\") " Jan 05 22:39:29 crc kubenswrapper[4910]: I0105 22:39:29.346811 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sjgwn\" (UniqueName: \"kubernetes.io/projected/43d815cb-9ec9-4523-acdf-ad9f6d60650c-kube-api-access-sjgwn\") pod \"43d815cb-9ec9-4523-acdf-ad9f6d60650c\" (UID: \"43d815cb-9ec9-4523-acdf-ad9f6d60650c\") " Jan 05 22:39:29 crc kubenswrapper[4910]: I0105 22:39:29.346849 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43d815cb-9ec9-4523-acdf-ad9f6d60650c-utilities\") pod \"43d815cb-9ec9-4523-acdf-ad9f6d60650c\" (UID: \"43d815cb-9ec9-4523-acdf-ad9f6d60650c\") " Jan 05 22:39:29 crc kubenswrapper[4910]: I0105 22:39:29.349194 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/43d815cb-9ec9-4523-acdf-ad9f6d60650c-utilities" (OuterVolumeSpecName: "utilities") pod "43d815cb-9ec9-4523-acdf-ad9f6d60650c" (UID: "43d815cb-9ec9-4523-acdf-ad9f6d60650c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:39:29 crc kubenswrapper[4910]: I0105 22:39:29.359315 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43d815cb-9ec9-4523-acdf-ad9f6d60650c-kube-api-access-sjgwn" (OuterVolumeSpecName: "kube-api-access-sjgwn") pod "43d815cb-9ec9-4523-acdf-ad9f6d60650c" (UID: "43d815cb-9ec9-4523-acdf-ad9f6d60650c"). InnerVolumeSpecName "kube-api-access-sjgwn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:39:29 crc kubenswrapper[4910]: I0105 22:39:29.422799 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/43d815cb-9ec9-4523-acdf-ad9f6d60650c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "43d815cb-9ec9-4523-acdf-ad9f6d60650c" (UID: "43d815cb-9ec9-4523-acdf-ad9f6d60650c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:39:29 crc kubenswrapper[4910]: I0105 22:39:29.436765 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dbf9r" event={"ID":"43d815cb-9ec9-4523-acdf-ad9f6d60650c","Type":"ContainerDied","Data":"20982779ff59b4036a41700e44dd87f9015633eac5c80a717d583c68c3f77453"} Jan 05 22:39:29 crc kubenswrapper[4910]: I0105 22:39:29.436827 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dbf9r" Jan 05 22:39:29 crc kubenswrapper[4910]: I0105 22:39:29.437974 4910 scope.go:117] "RemoveContainer" containerID="faf791c9b1f4e611caa5b2322b439f5a68056caad1334310022defbce95231af" Jan 05 22:39:29 crc kubenswrapper[4910]: I0105 22:39:29.452531 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43d815cb-9ec9-4523-acdf-ad9f6d60650c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 22:39:29 crc kubenswrapper[4910]: I0105 22:39:29.452567 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sjgwn\" (UniqueName: \"kubernetes.io/projected/43d815cb-9ec9-4523-acdf-ad9f6d60650c-kube-api-access-sjgwn\") on node \"crc\" DevicePath \"\"" Jan 05 22:39:29 crc kubenswrapper[4910]: I0105 22:39:29.452579 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43d815cb-9ec9-4523-acdf-ad9f6d60650c-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 22:39:29 crc kubenswrapper[4910]: I0105 22:39:29.470879 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-dbf9r"] Jan 05 22:39:29 crc kubenswrapper[4910]: I0105 22:39:29.477243 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-dbf9r"] Jan 05 22:39:29 crc kubenswrapper[4910]: I0105 22:39:29.483944 4910 scope.go:117] "RemoveContainer" containerID="3c20e827c1b9a11f3d323f1e30886ea52e3a164df2a0a90535084d0207b2337c" Jan 05 22:39:29 crc kubenswrapper[4910]: I0105 22:39:29.525785 4910 scope.go:117] "RemoveContainer" containerID="c4c70f6a6ef53fa5a25c6cafc2a45f8b1612998b0708a6b7351f1f96dfd6d07c" Jan 05 22:39:29 crc kubenswrapper[4910]: I0105 22:39:29.655829 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vxlr7" Jan 05 22:39:29 crc kubenswrapper[4910]: I0105 22:39:29.755941 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6afeea5-b246-4ff7-ba44-2b0f17c7f063-utilities\") pod \"a6afeea5-b246-4ff7-ba44-2b0f17c7f063\" (UID: \"a6afeea5-b246-4ff7-ba44-2b0f17c7f063\") " Jan 05 22:39:29 crc kubenswrapper[4910]: I0105 22:39:29.756134 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rrxsr\" (UniqueName: \"kubernetes.io/projected/a6afeea5-b246-4ff7-ba44-2b0f17c7f063-kube-api-access-rrxsr\") pod \"a6afeea5-b246-4ff7-ba44-2b0f17c7f063\" (UID: \"a6afeea5-b246-4ff7-ba44-2b0f17c7f063\") " Jan 05 22:39:29 crc kubenswrapper[4910]: I0105 22:39:29.756185 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6afeea5-b246-4ff7-ba44-2b0f17c7f063-catalog-content\") pod \"a6afeea5-b246-4ff7-ba44-2b0f17c7f063\" (UID: \"a6afeea5-b246-4ff7-ba44-2b0f17c7f063\") " Jan 05 22:39:29 crc kubenswrapper[4910]: I0105 22:39:29.756837 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a6afeea5-b246-4ff7-ba44-2b0f17c7f063-utilities" (OuterVolumeSpecName: "utilities") pod "a6afeea5-b246-4ff7-ba44-2b0f17c7f063" (UID: "a6afeea5-b246-4ff7-ba44-2b0f17c7f063"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:39:29 crc kubenswrapper[4910]: I0105 22:39:29.759579 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6afeea5-b246-4ff7-ba44-2b0f17c7f063-kube-api-access-rrxsr" (OuterVolumeSpecName: "kube-api-access-rrxsr") pod "a6afeea5-b246-4ff7-ba44-2b0f17c7f063" (UID: "a6afeea5-b246-4ff7-ba44-2b0f17c7f063"). InnerVolumeSpecName "kube-api-access-rrxsr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:39:29 crc kubenswrapper[4910]: I0105 22:39:29.802019 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a6afeea5-b246-4ff7-ba44-2b0f17c7f063-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a6afeea5-b246-4ff7-ba44-2b0f17c7f063" (UID: "a6afeea5-b246-4ff7-ba44-2b0f17c7f063"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:39:29 crc kubenswrapper[4910]: I0105 22:39:29.857678 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rrxsr\" (UniqueName: \"kubernetes.io/projected/a6afeea5-b246-4ff7-ba44-2b0f17c7f063-kube-api-access-rrxsr\") on node \"crc\" DevicePath \"\"" Jan 05 22:39:29 crc kubenswrapper[4910]: I0105 22:39:29.857714 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6afeea5-b246-4ff7-ba44-2b0f17c7f063-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 22:39:29 crc kubenswrapper[4910]: I0105 22:39:29.857724 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6afeea5-b246-4ff7-ba44-2b0f17c7f063-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 22:39:30 crc kubenswrapper[4910]: I0105 22:39:30.450584 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vxlr7" event={"ID":"a6afeea5-b246-4ff7-ba44-2b0f17c7f063","Type":"ContainerDied","Data":"31da604c1450277ebc4916f006ff8533cea4751b6b98b5264edcc9c1c9afafff"} Jan 05 22:39:30 crc kubenswrapper[4910]: I0105 22:39:30.450687 4910 scope.go:117] "RemoveContainer" containerID="19a2bfe76cc46923485313c10a7f6e7b1c06d9c846904e2d47583622ee7f8418" Jan 05 22:39:30 crc kubenswrapper[4910]: I0105 22:39:30.450686 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vxlr7" Jan 05 22:39:30 crc kubenswrapper[4910]: I0105 22:39:30.473648 4910 scope.go:117] "RemoveContainer" containerID="22d40c07f3eae4cad5d8b1b6503f2fcb8c6e3e8c25e9a89ea09d3cabdec2a4c6" Jan 05 22:39:30 crc kubenswrapper[4910]: I0105 22:39:30.494653 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vxlr7"] Jan 05 22:39:30 crc kubenswrapper[4910]: I0105 22:39:30.499583 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-vxlr7"] Jan 05 22:39:30 crc kubenswrapper[4910]: I0105 22:39:30.515234 4910 scope.go:117] "RemoveContainer" containerID="82e1c9919549993c726a361570bd60294bc146558cd4dfee0c191f4306fc44a6" Jan 05 22:39:30 crc kubenswrapper[4910]: I0105 22:39:30.728898 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43d815cb-9ec9-4523-acdf-ad9f6d60650c" path="/var/lib/kubelet/pods/43d815cb-9ec9-4523-acdf-ad9f6d60650c/volumes" Jan 05 22:39:30 crc kubenswrapper[4910]: I0105 22:39:30.729561 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6afeea5-b246-4ff7-ba44-2b0f17c7f063" path="/var/lib/kubelet/pods/a6afeea5-b246-4ff7-ba44-2b0f17c7f063/volumes" Jan 05 22:39:40 crc kubenswrapper[4910]: I0105 22:39:40.952780 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:39:40 crc kubenswrapper[4910]: I0105 22:39:40.953712 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:39:40 crc kubenswrapper[4910]: I0105 22:39:40.953797 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 22:39:40 crc kubenswrapper[4910]: I0105 22:39:40.954717 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"eb350b487bd18c2ab5f2ccaf6cb00d7658aebb5a5c6834dc939a14ab26869bf0"} pod="openshift-machine-config-operator/machine-config-daemon-p4t85" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 05 22:39:40 crc kubenswrapper[4910]: I0105 22:39:40.954791 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" containerID="cri-o://eb350b487bd18c2ab5f2ccaf6cb00d7658aebb5a5c6834dc939a14ab26869bf0" gracePeriod=600 Jan 05 22:39:41 crc kubenswrapper[4910]: I0105 22:39:41.535718 4910 generic.go:334] "Generic (PLEG): container finished" podID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerID="eb350b487bd18c2ab5f2ccaf6cb00d7658aebb5a5c6834dc939a14ab26869bf0" exitCode=0 Jan 05 22:39:41 crc kubenswrapper[4910]: I0105 22:39:41.536077 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerDied","Data":"eb350b487bd18c2ab5f2ccaf6cb00d7658aebb5a5c6834dc939a14ab26869bf0"} Jan 05 22:39:41 crc kubenswrapper[4910]: I0105 22:39:41.536108 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerStarted","Data":"2d6da1b270bbb2d94be361eea2634e5ce4a6bdb9e6d52682e6c76f4580f5be0b"} Jan 05 22:39:41 crc kubenswrapper[4910]: I0105 22:39:41.536138 4910 scope.go:117] "RemoveContainer" containerID="ecbcf471a393ef5c3d36665028d77090957964738e248ad9f27c971d5ab99cd9" Jan 05 22:42:10 crc kubenswrapper[4910]: I0105 22:42:10.952855 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:42:10 crc kubenswrapper[4910]: I0105 22:42:10.953839 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:42:40 crc kubenswrapper[4910]: I0105 22:42:40.952347 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:42:40 crc kubenswrapper[4910]: I0105 22:42:40.953481 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:43:10 crc kubenswrapper[4910]: I0105 22:43:10.952655 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:43:10 crc kubenswrapper[4910]: I0105 22:43:10.953493 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:43:10 crc kubenswrapper[4910]: I0105 22:43:10.953544 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 22:43:10 crc kubenswrapper[4910]: I0105 22:43:10.954218 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2d6da1b270bbb2d94be361eea2634e5ce4a6bdb9e6d52682e6c76f4580f5be0b"} pod="openshift-machine-config-operator/machine-config-daemon-p4t85" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 05 22:43:10 crc kubenswrapper[4910]: I0105 22:43:10.954277 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" containerID="cri-o://2d6da1b270bbb2d94be361eea2634e5ce4a6bdb9e6d52682e6c76f4580f5be0b" gracePeriod=600 Jan 05 22:43:11 crc kubenswrapper[4910]: E0105 22:43:11.083929 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:43:11 crc kubenswrapper[4910]: I0105 22:43:11.437776 4910 generic.go:334] "Generic (PLEG): container finished" podID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerID="2d6da1b270bbb2d94be361eea2634e5ce4a6bdb9e6d52682e6c76f4580f5be0b" exitCode=0 Jan 05 22:43:11 crc kubenswrapper[4910]: I0105 22:43:11.437855 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerDied","Data":"2d6da1b270bbb2d94be361eea2634e5ce4a6bdb9e6d52682e6c76f4580f5be0b"} Jan 05 22:43:11 crc kubenswrapper[4910]: I0105 22:43:11.437924 4910 scope.go:117] "RemoveContainer" containerID="eb350b487bd18c2ab5f2ccaf6cb00d7658aebb5a5c6834dc939a14ab26869bf0" Jan 05 22:43:11 crc kubenswrapper[4910]: I0105 22:43:11.439081 4910 scope.go:117] "RemoveContainer" containerID="2d6da1b270bbb2d94be361eea2634e5ce4a6bdb9e6d52682e6c76f4580f5be0b" Jan 05 22:43:11 crc kubenswrapper[4910]: E0105 22:43:11.439563 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:43:22 crc kubenswrapper[4910]: I0105 22:43:22.722425 4910 scope.go:117] "RemoveContainer" containerID="2d6da1b270bbb2d94be361eea2634e5ce4a6bdb9e6d52682e6c76f4580f5be0b" Jan 05 22:43:22 crc kubenswrapper[4910]: E0105 22:43:22.723627 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:43:34 crc kubenswrapper[4910]: I0105 22:43:34.722960 4910 scope.go:117] "RemoveContainer" containerID="2d6da1b270bbb2d94be361eea2634e5ce4a6bdb9e6d52682e6c76f4580f5be0b" Jan 05 22:43:34 crc kubenswrapper[4910]: E0105 22:43:34.724285 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:43:49 crc kubenswrapper[4910]: I0105 22:43:49.722204 4910 scope.go:117] "RemoveContainer" containerID="2d6da1b270bbb2d94be361eea2634e5ce4a6bdb9e6d52682e6c76f4580f5be0b" Jan 05 22:43:49 crc kubenswrapper[4910]: E0105 22:43:49.723495 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:44:00 crc kubenswrapper[4910]: I0105 22:44:00.722680 4910 scope.go:117] "RemoveContainer" containerID="2d6da1b270bbb2d94be361eea2634e5ce4a6bdb9e6d52682e6c76f4580f5be0b" Jan 05 22:44:00 crc kubenswrapper[4910]: E0105 22:44:00.723919 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:44:15 crc kubenswrapper[4910]: I0105 22:44:15.721308 4910 scope.go:117] "RemoveContainer" containerID="2d6da1b270bbb2d94be361eea2634e5ce4a6bdb9e6d52682e6c76f4580f5be0b" Jan 05 22:44:15 crc kubenswrapper[4910]: E0105 22:44:15.721984 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:44:27 crc kubenswrapper[4910]: I0105 22:44:27.721761 4910 scope.go:117] "RemoveContainer" containerID="2d6da1b270bbb2d94be361eea2634e5ce4a6bdb9e6d52682e6c76f4580f5be0b" Jan 05 22:44:27 crc kubenswrapper[4910]: E0105 22:44:27.723797 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:44:40 crc kubenswrapper[4910]: I0105 22:44:40.723875 4910 scope.go:117] "RemoveContainer" containerID="2d6da1b270bbb2d94be361eea2634e5ce4a6bdb9e6d52682e6c76f4580f5be0b" Jan 05 22:44:40 crc kubenswrapper[4910]: E0105 22:44:40.724851 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:44:53 crc kubenswrapper[4910]: I0105 22:44:53.722660 4910 scope.go:117] "RemoveContainer" containerID="2d6da1b270bbb2d94be361eea2634e5ce4a6bdb9e6d52682e6c76f4580f5be0b" Jan 05 22:44:53 crc kubenswrapper[4910]: E0105 22:44:53.723619 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:45:00 crc kubenswrapper[4910]: I0105 22:45:00.144882 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460885-zfm2s"] Jan 05 22:45:00 crc kubenswrapper[4910]: E0105 22:45:00.145764 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6afeea5-b246-4ff7-ba44-2b0f17c7f063" containerName="extract-content" Jan 05 22:45:00 crc kubenswrapper[4910]: I0105 22:45:00.145784 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6afeea5-b246-4ff7-ba44-2b0f17c7f063" containerName="extract-content" Jan 05 22:45:00 crc kubenswrapper[4910]: E0105 22:45:00.145805 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43d815cb-9ec9-4523-acdf-ad9f6d60650c" containerName="extract-utilities" Jan 05 22:45:00 crc kubenswrapper[4910]: I0105 22:45:00.145813 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="43d815cb-9ec9-4523-acdf-ad9f6d60650c" containerName="extract-utilities" Jan 05 22:45:00 crc kubenswrapper[4910]: E0105 22:45:00.145836 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6afeea5-b246-4ff7-ba44-2b0f17c7f063" containerName="extract-utilities" Jan 05 22:45:00 crc kubenswrapper[4910]: I0105 22:45:00.145844 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6afeea5-b246-4ff7-ba44-2b0f17c7f063" containerName="extract-utilities" Jan 05 22:45:00 crc kubenswrapper[4910]: E0105 22:45:00.145861 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6afeea5-b246-4ff7-ba44-2b0f17c7f063" containerName="registry-server" Jan 05 22:45:00 crc kubenswrapper[4910]: I0105 22:45:00.145868 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6afeea5-b246-4ff7-ba44-2b0f17c7f063" containerName="registry-server" Jan 05 22:45:00 crc kubenswrapper[4910]: E0105 22:45:00.145894 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43d815cb-9ec9-4523-acdf-ad9f6d60650c" containerName="extract-content" Jan 05 22:45:00 crc kubenswrapper[4910]: I0105 22:45:00.145903 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="43d815cb-9ec9-4523-acdf-ad9f6d60650c" containerName="extract-content" Jan 05 22:45:00 crc kubenswrapper[4910]: E0105 22:45:00.145911 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43d815cb-9ec9-4523-acdf-ad9f6d60650c" containerName="registry-server" Jan 05 22:45:00 crc kubenswrapper[4910]: I0105 22:45:00.145918 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="43d815cb-9ec9-4523-acdf-ad9f6d60650c" containerName="registry-server" Jan 05 22:45:00 crc kubenswrapper[4910]: I0105 22:45:00.146313 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="43d815cb-9ec9-4523-acdf-ad9f6d60650c" containerName="registry-server" Jan 05 22:45:00 crc kubenswrapper[4910]: I0105 22:45:00.146342 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6afeea5-b246-4ff7-ba44-2b0f17c7f063" containerName="registry-server" Jan 05 22:45:00 crc kubenswrapper[4910]: I0105 22:45:00.147040 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460885-zfm2s" Jan 05 22:45:00 crc kubenswrapper[4910]: I0105 22:45:00.151898 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 05 22:45:00 crc kubenswrapper[4910]: I0105 22:45:00.152186 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 05 22:45:00 crc kubenswrapper[4910]: I0105 22:45:00.158197 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460885-zfm2s"] Jan 05 22:45:00 crc kubenswrapper[4910]: I0105 22:45:00.272514 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5da3b211-d6bb-4b8b-8918-21d631902c74-secret-volume\") pod \"collect-profiles-29460885-zfm2s\" (UID: \"5da3b211-d6bb-4b8b-8918-21d631902c74\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460885-zfm2s" Jan 05 22:45:00 crc kubenswrapper[4910]: I0105 22:45:00.272578 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vthqq\" (UniqueName: \"kubernetes.io/projected/5da3b211-d6bb-4b8b-8918-21d631902c74-kube-api-access-vthqq\") pod \"collect-profiles-29460885-zfm2s\" (UID: \"5da3b211-d6bb-4b8b-8918-21d631902c74\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460885-zfm2s" Jan 05 22:45:00 crc kubenswrapper[4910]: I0105 22:45:00.272610 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5da3b211-d6bb-4b8b-8918-21d631902c74-config-volume\") pod \"collect-profiles-29460885-zfm2s\" (UID: \"5da3b211-d6bb-4b8b-8918-21d631902c74\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460885-zfm2s" Jan 05 22:45:00 crc kubenswrapper[4910]: I0105 22:45:00.373618 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5da3b211-d6bb-4b8b-8918-21d631902c74-secret-volume\") pod \"collect-profiles-29460885-zfm2s\" (UID: \"5da3b211-d6bb-4b8b-8918-21d631902c74\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460885-zfm2s" Jan 05 22:45:00 crc kubenswrapper[4910]: I0105 22:45:00.373669 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vthqq\" (UniqueName: \"kubernetes.io/projected/5da3b211-d6bb-4b8b-8918-21d631902c74-kube-api-access-vthqq\") pod \"collect-profiles-29460885-zfm2s\" (UID: \"5da3b211-d6bb-4b8b-8918-21d631902c74\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460885-zfm2s" Jan 05 22:45:00 crc kubenswrapper[4910]: I0105 22:45:00.373692 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5da3b211-d6bb-4b8b-8918-21d631902c74-config-volume\") pod \"collect-profiles-29460885-zfm2s\" (UID: \"5da3b211-d6bb-4b8b-8918-21d631902c74\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460885-zfm2s" Jan 05 22:45:00 crc kubenswrapper[4910]: I0105 22:45:00.374762 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5da3b211-d6bb-4b8b-8918-21d631902c74-config-volume\") pod \"collect-profiles-29460885-zfm2s\" (UID: \"5da3b211-d6bb-4b8b-8918-21d631902c74\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460885-zfm2s" Jan 05 22:45:00 crc kubenswrapper[4910]: I0105 22:45:00.382250 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5da3b211-d6bb-4b8b-8918-21d631902c74-secret-volume\") pod \"collect-profiles-29460885-zfm2s\" (UID: \"5da3b211-d6bb-4b8b-8918-21d631902c74\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460885-zfm2s" Jan 05 22:45:00 crc kubenswrapper[4910]: I0105 22:45:00.395569 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vthqq\" (UniqueName: \"kubernetes.io/projected/5da3b211-d6bb-4b8b-8918-21d631902c74-kube-api-access-vthqq\") pod \"collect-profiles-29460885-zfm2s\" (UID: \"5da3b211-d6bb-4b8b-8918-21d631902c74\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460885-zfm2s" Jan 05 22:45:00 crc kubenswrapper[4910]: I0105 22:45:00.474796 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460885-zfm2s" Jan 05 22:45:00 crc kubenswrapper[4910]: I0105 22:45:00.975648 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460885-zfm2s"] Jan 05 22:45:01 crc kubenswrapper[4910]: I0105 22:45:01.477666 4910 generic.go:334] "Generic (PLEG): container finished" podID="5da3b211-d6bb-4b8b-8918-21d631902c74" containerID="f7896415c95fe8b75b3bb8f093c88e6065975dc0d26cc7bb7680a45e569b8209" exitCode=0 Jan 05 22:45:01 crc kubenswrapper[4910]: I0105 22:45:01.477918 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29460885-zfm2s" event={"ID":"5da3b211-d6bb-4b8b-8918-21d631902c74","Type":"ContainerDied","Data":"f7896415c95fe8b75b3bb8f093c88e6065975dc0d26cc7bb7680a45e569b8209"} Jan 05 22:45:01 crc kubenswrapper[4910]: I0105 22:45:01.478147 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29460885-zfm2s" event={"ID":"5da3b211-d6bb-4b8b-8918-21d631902c74","Type":"ContainerStarted","Data":"0817ec1505e6fec7769ce7225c58469480ef8a85d07d2cb3a4a4e403ba40436a"} Jan 05 22:45:02 crc kubenswrapper[4910]: I0105 22:45:02.831143 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460885-zfm2s" Jan 05 22:45:02 crc kubenswrapper[4910]: I0105 22:45:02.941352 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5da3b211-d6bb-4b8b-8918-21d631902c74-config-volume\") pod \"5da3b211-d6bb-4b8b-8918-21d631902c74\" (UID: \"5da3b211-d6bb-4b8b-8918-21d631902c74\") " Jan 05 22:45:02 crc kubenswrapper[4910]: I0105 22:45:02.941442 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vthqq\" (UniqueName: \"kubernetes.io/projected/5da3b211-d6bb-4b8b-8918-21d631902c74-kube-api-access-vthqq\") pod \"5da3b211-d6bb-4b8b-8918-21d631902c74\" (UID: \"5da3b211-d6bb-4b8b-8918-21d631902c74\") " Jan 05 22:45:02 crc kubenswrapper[4910]: I0105 22:45:02.941484 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5da3b211-d6bb-4b8b-8918-21d631902c74-secret-volume\") pod \"5da3b211-d6bb-4b8b-8918-21d631902c74\" (UID: \"5da3b211-d6bb-4b8b-8918-21d631902c74\") " Jan 05 22:45:02 crc kubenswrapper[4910]: I0105 22:45:02.942797 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5da3b211-d6bb-4b8b-8918-21d631902c74-config-volume" (OuterVolumeSpecName: "config-volume") pod "5da3b211-d6bb-4b8b-8918-21d631902c74" (UID: "5da3b211-d6bb-4b8b-8918-21d631902c74"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 22:45:02 crc kubenswrapper[4910]: I0105 22:45:02.947328 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5da3b211-d6bb-4b8b-8918-21d631902c74-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "5da3b211-d6bb-4b8b-8918-21d631902c74" (UID: "5da3b211-d6bb-4b8b-8918-21d631902c74"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 22:45:02 crc kubenswrapper[4910]: I0105 22:45:02.947606 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5da3b211-d6bb-4b8b-8918-21d631902c74-kube-api-access-vthqq" (OuterVolumeSpecName: "kube-api-access-vthqq") pod "5da3b211-d6bb-4b8b-8918-21d631902c74" (UID: "5da3b211-d6bb-4b8b-8918-21d631902c74"). InnerVolumeSpecName "kube-api-access-vthqq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:45:03 crc kubenswrapper[4910]: I0105 22:45:03.042869 4910 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5da3b211-d6bb-4b8b-8918-21d631902c74-config-volume\") on node \"crc\" DevicePath \"\"" Jan 05 22:45:03 crc kubenswrapper[4910]: I0105 22:45:03.042916 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vthqq\" (UniqueName: \"kubernetes.io/projected/5da3b211-d6bb-4b8b-8918-21d631902c74-kube-api-access-vthqq\") on node \"crc\" DevicePath \"\"" Jan 05 22:45:03 crc kubenswrapper[4910]: I0105 22:45:03.042939 4910 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5da3b211-d6bb-4b8b-8918-21d631902c74-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 05 22:45:03 crc kubenswrapper[4910]: I0105 22:45:03.498279 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29460885-zfm2s" event={"ID":"5da3b211-d6bb-4b8b-8918-21d631902c74","Type":"ContainerDied","Data":"0817ec1505e6fec7769ce7225c58469480ef8a85d07d2cb3a4a4e403ba40436a"} Jan 05 22:45:03 crc kubenswrapper[4910]: I0105 22:45:03.498335 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460885-zfm2s" Jan 05 22:45:03 crc kubenswrapper[4910]: I0105 22:45:03.498342 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0817ec1505e6fec7769ce7225c58469480ef8a85d07d2cb3a4a4e403ba40436a" Jan 05 22:45:03 crc kubenswrapper[4910]: I0105 22:45:03.937783 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460840-89jfq"] Jan 05 22:45:03 crc kubenswrapper[4910]: I0105 22:45:03.945677 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460840-89jfq"] Jan 05 22:45:04 crc kubenswrapper[4910]: I0105 22:45:04.740068 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fe30943a-f40d-49bd-b9a8-bb0b6e1701d8" path="/var/lib/kubelet/pods/fe30943a-f40d-49bd-b9a8-bb0b6e1701d8/volumes" Jan 05 22:45:06 crc kubenswrapper[4910]: I0105 22:45:06.720918 4910 scope.go:117] "RemoveContainer" containerID="2d6da1b270bbb2d94be361eea2634e5ce4a6bdb9e6d52682e6c76f4580f5be0b" Jan 05 22:45:06 crc kubenswrapper[4910]: E0105 22:45:06.721248 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:45:21 crc kubenswrapper[4910]: I0105 22:45:21.721084 4910 scope.go:117] "RemoveContainer" containerID="2d6da1b270bbb2d94be361eea2634e5ce4a6bdb9e6d52682e6c76f4580f5be0b" Jan 05 22:45:21 crc kubenswrapper[4910]: E0105 22:45:21.721890 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:45:35 crc kubenswrapper[4910]: I0105 22:45:35.721564 4910 scope.go:117] "RemoveContainer" containerID="2d6da1b270bbb2d94be361eea2634e5ce4a6bdb9e6d52682e6c76f4580f5be0b" Jan 05 22:45:35 crc kubenswrapper[4910]: E0105 22:45:35.722615 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:45:43 crc kubenswrapper[4910]: I0105 22:45:43.282587 4910 scope.go:117] "RemoveContainer" containerID="11fde922fb9628da7da35571440344c7219779235ab7eabad9522924ee3f8703" Jan 05 22:45:48 crc kubenswrapper[4910]: I0105 22:45:48.728387 4910 scope.go:117] "RemoveContainer" containerID="2d6da1b270bbb2d94be361eea2634e5ce4a6bdb9e6d52682e6c76f4580f5be0b" Jan 05 22:45:48 crc kubenswrapper[4910]: E0105 22:45:48.729461 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:46:00 crc kubenswrapper[4910]: I0105 22:46:00.722102 4910 scope.go:117] "RemoveContainer" containerID="2d6da1b270bbb2d94be361eea2634e5ce4a6bdb9e6d52682e6c76f4580f5be0b" Jan 05 22:46:00 crc kubenswrapper[4910]: E0105 22:46:00.723393 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:46:11 crc kubenswrapper[4910]: I0105 22:46:11.721676 4910 scope.go:117] "RemoveContainer" containerID="2d6da1b270bbb2d94be361eea2634e5ce4a6bdb9e6d52682e6c76f4580f5be0b" Jan 05 22:46:11 crc kubenswrapper[4910]: E0105 22:46:11.722561 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:46:23 crc kubenswrapper[4910]: I0105 22:46:23.722705 4910 scope.go:117] "RemoveContainer" containerID="2d6da1b270bbb2d94be361eea2634e5ce4a6bdb9e6d52682e6c76f4580f5be0b" Jan 05 22:46:23 crc kubenswrapper[4910]: E0105 22:46:23.724173 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:46:25 crc kubenswrapper[4910]: I0105 22:46:25.789529 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-rc5ph"] Jan 05 22:46:25 crc kubenswrapper[4910]: E0105 22:46:25.790058 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5da3b211-d6bb-4b8b-8918-21d631902c74" containerName="collect-profiles" Jan 05 22:46:25 crc kubenswrapper[4910]: I0105 22:46:25.790082 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="5da3b211-d6bb-4b8b-8918-21d631902c74" containerName="collect-profiles" Jan 05 22:46:25 crc kubenswrapper[4910]: I0105 22:46:25.790601 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="5da3b211-d6bb-4b8b-8918-21d631902c74" containerName="collect-profiles" Jan 05 22:46:25 crc kubenswrapper[4910]: I0105 22:46:25.792483 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rc5ph" Jan 05 22:46:25 crc kubenswrapper[4910]: I0105 22:46:25.846459 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rc5ph"] Jan 05 22:46:25 crc kubenswrapper[4910]: I0105 22:46:25.908338 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d3cb396-60d6-4ee6-8821-d181227e51ff-utilities\") pod \"redhat-marketplace-rc5ph\" (UID: \"0d3cb396-60d6-4ee6-8821-d181227e51ff\") " pod="openshift-marketplace/redhat-marketplace-rc5ph" Jan 05 22:46:25 crc kubenswrapper[4910]: I0105 22:46:25.908657 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d3cb396-60d6-4ee6-8821-d181227e51ff-catalog-content\") pod \"redhat-marketplace-rc5ph\" (UID: \"0d3cb396-60d6-4ee6-8821-d181227e51ff\") " pod="openshift-marketplace/redhat-marketplace-rc5ph" Jan 05 22:46:25 crc kubenswrapper[4910]: I0105 22:46:25.908677 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hbfbd\" (UniqueName: \"kubernetes.io/projected/0d3cb396-60d6-4ee6-8821-d181227e51ff-kube-api-access-hbfbd\") pod \"redhat-marketplace-rc5ph\" (UID: \"0d3cb396-60d6-4ee6-8821-d181227e51ff\") " pod="openshift-marketplace/redhat-marketplace-rc5ph" Jan 05 22:46:26 crc kubenswrapper[4910]: I0105 22:46:26.010344 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d3cb396-60d6-4ee6-8821-d181227e51ff-utilities\") pod \"redhat-marketplace-rc5ph\" (UID: \"0d3cb396-60d6-4ee6-8821-d181227e51ff\") " pod="openshift-marketplace/redhat-marketplace-rc5ph" Jan 05 22:46:26 crc kubenswrapper[4910]: I0105 22:46:26.010493 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d3cb396-60d6-4ee6-8821-d181227e51ff-catalog-content\") pod \"redhat-marketplace-rc5ph\" (UID: \"0d3cb396-60d6-4ee6-8821-d181227e51ff\") " pod="openshift-marketplace/redhat-marketplace-rc5ph" Jan 05 22:46:26 crc kubenswrapper[4910]: I0105 22:46:26.010519 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hbfbd\" (UniqueName: \"kubernetes.io/projected/0d3cb396-60d6-4ee6-8821-d181227e51ff-kube-api-access-hbfbd\") pod \"redhat-marketplace-rc5ph\" (UID: \"0d3cb396-60d6-4ee6-8821-d181227e51ff\") " pod="openshift-marketplace/redhat-marketplace-rc5ph" Jan 05 22:46:26 crc kubenswrapper[4910]: I0105 22:46:26.010824 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d3cb396-60d6-4ee6-8821-d181227e51ff-utilities\") pod \"redhat-marketplace-rc5ph\" (UID: \"0d3cb396-60d6-4ee6-8821-d181227e51ff\") " pod="openshift-marketplace/redhat-marketplace-rc5ph" Jan 05 22:46:26 crc kubenswrapper[4910]: I0105 22:46:26.011068 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d3cb396-60d6-4ee6-8821-d181227e51ff-catalog-content\") pod \"redhat-marketplace-rc5ph\" (UID: \"0d3cb396-60d6-4ee6-8821-d181227e51ff\") " pod="openshift-marketplace/redhat-marketplace-rc5ph" Jan 05 22:46:26 crc kubenswrapper[4910]: I0105 22:46:26.040718 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hbfbd\" (UniqueName: \"kubernetes.io/projected/0d3cb396-60d6-4ee6-8821-d181227e51ff-kube-api-access-hbfbd\") pod \"redhat-marketplace-rc5ph\" (UID: \"0d3cb396-60d6-4ee6-8821-d181227e51ff\") " pod="openshift-marketplace/redhat-marketplace-rc5ph" Jan 05 22:46:26 crc kubenswrapper[4910]: I0105 22:46:26.156191 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rc5ph" Jan 05 22:46:26 crc kubenswrapper[4910]: I0105 22:46:26.473526 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rc5ph"] Jan 05 22:46:26 crc kubenswrapper[4910]: W0105 22:46:26.482798 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d3cb396_60d6_4ee6_8821_d181227e51ff.slice/crio-2472ae4d3560c9539f7edce54a2c13124e0ba72189250631c289b5d4b96b8f1f WatchSource:0}: Error finding container 2472ae4d3560c9539f7edce54a2c13124e0ba72189250631c289b5d4b96b8f1f: Status 404 returned error can't find the container with id 2472ae4d3560c9539f7edce54a2c13124e0ba72189250631c289b5d4b96b8f1f Jan 05 22:46:27 crc kubenswrapper[4910]: I0105 22:46:27.241708 4910 generic.go:334] "Generic (PLEG): container finished" podID="0d3cb396-60d6-4ee6-8821-d181227e51ff" containerID="d1f3e33a1f00123182f81324c6e70a62ee33cf37626732c0e0fa0c03471f0cd3" exitCode=0 Jan 05 22:46:27 crc kubenswrapper[4910]: I0105 22:46:27.241854 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rc5ph" event={"ID":"0d3cb396-60d6-4ee6-8821-d181227e51ff","Type":"ContainerDied","Data":"d1f3e33a1f00123182f81324c6e70a62ee33cf37626732c0e0fa0c03471f0cd3"} Jan 05 22:46:27 crc kubenswrapper[4910]: I0105 22:46:27.242149 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rc5ph" event={"ID":"0d3cb396-60d6-4ee6-8821-d181227e51ff","Type":"ContainerStarted","Data":"2472ae4d3560c9539f7edce54a2c13124e0ba72189250631c289b5d4b96b8f1f"} Jan 05 22:46:27 crc kubenswrapper[4910]: I0105 22:46:27.244981 4910 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 05 22:46:29 crc kubenswrapper[4910]: I0105 22:46:29.278430 4910 generic.go:334] "Generic (PLEG): container finished" podID="0d3cb396-60d6-4ee6-8821-d181227e51ff" containerID="c2780f699f5781f31aa75806f1d9534dc6fe3a40df18dae4d8023d4370f0d5d8" exitCode=0 Jan 05 22:46:29 crc kubenswrapper[4910]: I0105 22:46:29.278561 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rc5ph" event={"ID":"0d3cb396-60d6-4ee6-8821-d181227e51ff","Type":"ContainerDied","Data":"c2780f699f5781f31aa75806f1d9534dc6fe3a40df18dae4d8023d4370f0d5d8"} Jan 05 22:46:30 crc kubenswrapper[4910]: I0105 22:46:30.296305 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rc5ph" event={"ID":"0d3cb396-60d6-4ee6-8821-d181227e51ff","Type":"ContainerStarted","Data":"60811e7188bae638aa26b7e024df283608039c4d3cb0046549205dccb7246e86"} Jan 05 22:46:30 crc kubenswrapper[4910]: I0105 22:46:30.329983 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-rc5ph" podStartSLOduration=2.88712208 podStartE2EDuration="5.329942081s" podCreationTimestamp="2026-01-05 22:46:25 +0000 UTC" firstStartedPulling="2026-01-05 22:46:27.244660587 +0000 UTC m=+3318.822158267" lastFinishedPulling="2026-01-05 22:46:29.687480558 +0000 UTC m=+3321.264978268" observedRunningTime="2026-01-05 22:46:30.319356606 +0000 UTC m=+3321.896854296" watchObservedRunningTime="2026-01-05 22:46:30.329942081 +0000 UTC m=+3321.907439791" Jan 05 22:46:36 crc kubenswrapper[4910]: I0105 22:46:36.157567 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-rc5ph" Jan 05 22:46:36 crc kubenswrapper[4910]: I0105 22:46:36.158262 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-rc5ph" Jan 05 22:46:36 crc kubenswrapper[4910]: I0105 22:46:36.209604 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-rc5ph" Jan 05 22:46:36 crc kubenswrapper[4910]: I0105 22:46:36.414284 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-rc5ph" Jan 05 22:46:36 crc kubenswrapper[4910]: I0105 22:46:36.511684 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rc5ph"] Jan 05 22:46:37 crc kubenswrapper[4910]: I0105 22:46:37.721495 4910 scope.go:117] "RemoveContainer" containerID="2d6da1b270bbb2d94be361eea2634e5ce4a6bdb9e6d52682e6c76f4580f5be0b" Jan 05 22:46:37 crc kubenswrapper[4910]: E0105 22:46:37.722251 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:46:38 crc kubenswrapper[4910]: I0105 22:46:38.382023 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-rc5ph" podUID="0d3cb396-60d6-4ee6-8821-d181227e51ff" containerName="registry-server" containerID="cri-o://60811e7188bae638aa26b7e024df283608039c4d3cb0046549205dccb7246e86" gracePeriod=2 Jan 05 22:46:38 crc kubenswrapper[4910]: I0105 22:46:38.870183 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-ltdm5"] Jan 05 22:46:38 crc kubenswrapper[4910]: I0105 22:46:38.874101 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ltdm5" Jan 05 22:46:38 crc kubenswrapper[4910]: I0105 22:46:38.884599 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ltdm5"] Jan 05 22:46:38 crc kubenswrapper[4910]: I0105 22:46:38.963523 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/852869f8-c7b6-4134-843f-0173570fb56c-utilities\") pod \"redhat-operators-ltdm5\" (UID: \"852869f8-c7b6-4134-843f-0173570fb56c\") " pod="openshift-marketplace/redhat-operators-ltdm5" Jan 05 22:46:38 crc kubenswrapper[4910]: I0105 22:46:38.963703 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/852869f8-c7b6-4134-843f-0173570fb56c-catalog-content\") pod \"redhat-operators-ltdm5\" (UID: \"852869f8-c7b6-4134-843f-0173570fb56c\") " pod="openshift-marketplace/redhat-operators-ltdm5" Jan 05 22:46:38 crc kubenswrapper[4910]: I0105 22:46:38.963775 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xqqf8\" (UniqueName: \"kubernetes.io/projected/852869f8-c7b6-4134-843f-0173570fb56c-kube-api-access-xqqf8\") pod \"redhat-operators-ltdm5\" (UID: \"852869f8-c7b6-4134-843f-0173570fb56c\") " pod="openshift-marketplace/redhat-operators-ltdm5" Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.065951 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/852869f8-c7b6-4134-843f-0173570fb56c-utilities\") pod \"redhat-operators-ltdm5\" (UID: \"852869f8-c7b6-4134-843f-0173570fb56c\") " pod="openshift-marketplace/redhat-operators-ltdm5" Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.066104 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/852869f8-c7b6-4134-843f-0173570fb56c-catalog-content\") pod \"redhat-operators-ltdm5\" (UID: \"852869f8-c7b6-4134-843f-0173570fb56c\") " pod="openshift-marketplace/redhat-operators-ltdm5" Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.066195 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xqqf8\" (UniqueName: \"kubernetes.io/projected/852869f8-c7b6-4134-843f-0173570fb56c-kube-api-access-xqqf8\") pod \"redhat-operators-ltdm5\" (UID: \"852869f8-c7b6-4134-843f-0173570fb56c\") " pod="openshift-marketplace/redhat-operators-ltdm5" Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.066679 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/852869f8-c7b6-4134-843f-0173570fb56c-utilities\") pod \"redhat-operators-ltdm5\" (UID: \"852869f8-c7b6-4134-843f-0173570fb56c\") " pod="openshift-marketplace/redhat-operators-ltdm5" Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.066872 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/852869f8-c7b6-4134-843f-0173570fb56c-catalog-content\") pod \"redhat-operators-ltdm5\" (UID: \"852869f8-c7b6-4134-843f-0173570fb56c\") " pod="openshift-marketplace/redhat-operators-ltdm5" Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.096215 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xqqf8\" (UniqueName: \"kubernetes.io/projected/852869f8-c7b6-4134-843f-0173570fb56c-kube-api-access-xqqf8\") pod \"redhat-operators-ltdm5\" (UID: \"852869f8-c7b6-4134-843f-0173570fb56c\") " pod="openshift-marketplace/redhat-operators-ltdm5" Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.201058 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ltdm5" Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.387591 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rc5ph" Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.399183 4910 generic.go:334] "Generic (PLEG): container finished" podID="0d3cb396-60d6-4ee6-8821-d181227e51ff" containerID="60811e7188bae638aa26b7e024df283608039c4d3cb0046549205dccb7246e86" exitCode=0 Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.399243 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rc5ph" event={"ID":"0d3cb396-60d6-4ee6-8821-d181227e51ff","Type":"ContainerDied","Data":"60811e7188bae638aa26b7e024df283608039c4d3cb0046549205dccb7246e86"} Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.399288 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rc5ph" Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.399328 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rc5ph" event={"ID":"0d3cb396-60d6-4ee6-8821-d181227e51ff","Type":"ContainerDied","Data":"2472ae4d3560c9539f7edce54a2c13124e0ba72189250631c289b5d4b96b8f1f"} Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.399359 4910 scope.go:117] "RemoveContainer" containerID="60811e7188bae638aa26b7e024df283608039c4d3cb0046549205dccb7246e86" Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.439990 4910 scope.go:117] "RemoveContainer" containerID="c2780f699f5781f31aa75806f1d9534dc6fe3a40df18dae4d8023d4370f0d5d8" Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.471690 4910 scope.go:117] "RemoveContainer" containerID="d1f3e33a1f00123182f81324c6e70a62ee33cf37626732c0e0fa0c03471f0cd3" Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.474762 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hbfbd\" (UniqueName: \"kubernetes.io/projected/0d3cb396-60d6-4ee6-8821-d181227e51ff-kube-api-access-hbfbd\") pod \"0d3cb396-60d6-4ee6-8821-d181227e51ff\" (UID: \"0d3cb396-60d6-4ee6-8821-d181227e51ff\") " Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.474850 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d3cb396-60d6-4ee6-8821-d181227e51ff-catalog-content\") pod \"0d3cb396-60d6-4ee6-8821-d181227e51ff\" (UID: \"0d3cb396-60d6-4ee6-8821-d181227e51ff\") " Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.474882 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d3cb396-60d6-4ee6-8821-d181227e51ff-utilities\") pod \"0d3cb396-60d6-4ee6-8821-d181227e51ff\" (UID: \"0d3cb396-60d6-4ee6-8821-d181227e51ff\") " Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.481633 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d3cb396-60d6-4ee6-8821-d181227e51ff-utilities" (OuterVolumeSpecName: "utilities") pod "0d3cb396-60d6-4ee6-8821-d181227e51ff" (UID: "0d3cb396-60d6-4ee6-8821-d181227e51ff"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.492517 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d3cb396-60d6-4ee6-8821-d181227e51ff-kube-api-access-hbfbd" (OuterVolumeSpecName: "kube-api-access-hbfbd") pod "0d3cb396-60d6-4ee6-8821-d181227e51ff" (UID: "0d3cb396-60d6-4ee6-8821-d181227e51ff"). InnerVolumeSpecName "kube-api-access-hbfbd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.532749 4910 scope.go:117] "RemoveContainer" containerID="60811e7188bae638aa26b7e024df283608039c4d3cb0046549205dccb7246e86" Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.533044 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d3cb396-60d6-4ee6-8821-d181227e51ff-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0d3cb396-60d6-4ee6-8821-d181227e51ff" (UID: "0d3cb396-60d6-4ee6-8821-d181227e51ff"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:46:39 crc kubenswrapper[4910]: E0105 22:46:39.533806 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60811e7188bae638aa26b7e024df283608039c4d3cb0046549205dccb7246e86\": container with ID starting with 60811e7188bae638aa26b7e024df283608039c4d3cb0046549205dccb7246e86 not found: ID does not exist" containerID="60811e7188bae638aa26b7e024df283608039c4d3cb0046549205dccb7246e86" Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.533845 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60811e7188bae638aa26b7e024df283608039c4d3cb0046549205dccb7246e86"} err="failed to get container status \"60811e7188bae638aa26b7e024df283608039c4d3cb0046549205dccb7246e86\": rpc error: code = NotFound desc = could not find container \"60811e7188bae638aa26b7e024df283608039c4d3cb0046549205dccb7246e86\": container with ID starting with 60811e7188bae638aa26b7e024df283608039c4d3cb0046549205dccb7246e86 not found: ID does not exist" Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.533880 4910 scope.go:117] "RemoveContainer" containerID="c2780f699f5781f31aa75806f1d9534dc6fe3a40df18dae4d8023d4370f0d5d8" Jan 05 22:46:39 crc kubenswrapper[4910]: E0105 22:46:39.534480 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c2780f699f5781f31aa75806f1d9534dc6fe3a40df18dae4d8023d4370f0d5d8\": container with ID starting with c2780f699f5781f31aa75806f1d9534dc6fe3a40df18dae4d8023d4370f0d5d8 not found: ID does not exist" containerID="c2780f699f5781f31aa75806f1d9534dc6fe3a40df18dae4d8023d4370f0d5d8" Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.534502 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c2780f699f5781f31aa75806f1d9534dc6fe3a40df18dae4d8023d4370f0d5d8"} err="failed to get container status \"c2780f699f5781f31aa75806f1d9534dc6fe3a40df18dae4d8023d4370f0d5d8\": rpc error: code = NotFound desc = could not find container \"c2780f699f5781f31aa75806f1d9534dc6fe3a40df18dae4d8023d4370f0d5d8\": container with ID starting with c2780f699f5781f31aa75806f1d9534dc6fe3a40df18dae4d8023d4370f0d5d8 not found: ID does not exist" Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.534517 4910 scope.go:117] "RemoveContainer" containerID="d1f3e33a1f00123182f81324c6e70a62ee33cf37626732c0e0fa0c03471f0cd3" Jan 05 22:46:39 crc kubenswrapper[4910]: E0105 22:46:39.536096 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d1f3e33a1f00123182f81324c6e70a62ee33cf37626732c0e0fa0c03471f0cd3\": container with ID starting with d1f3e33a1f00123182f81324c6e70a62ee33cf37626732c0e0fa0c03471f0cd3 not found: ID does not exist" containerID="d1f3e33a1f00123182f81324c6e70a62ee33cf37626732c0e0fa0c03471f0cd3" Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.536153 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d1f3e33a1f00123182f81324c6e70a62ee33cf37626732c0e0fa0c03471f0cd3"} err="failed to get container status \"d1f3e33a1f00123182f81324c6e70a62ee33cf37626732c0e0fa0c03471f0cd3\": rpc error: code = NotFound desc = could not find container \"d1f3e33a1f00123182f81324c6e70a62ee33cf37626732c0e0fa0c03471f0cd3\": container with ID starting with d1f3e33a1f00123182f81324c6e70a62ee33cf37626732c0e0fa0c03471f0cd3 not found: ID does not exist" Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.577905 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hbfbd\" (UniqueName: \"kubernetes.io/projected/0d3cb396-60d6-4ee6-8821-d181227e51ff-kube-api-access-hbfbd\") on node \"crc\" DevicePath \"\"" Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.577975 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d3cb396-60d6-4ee6-8821-d181227e51ff-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.577992 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d3cb396-60d6-4ee6-8821-d181227e51ff-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.731692 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rc5ph"] Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.738254 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-rc5ph"] Jan 05 22:46:39 crc kubenswrapper[4910]: I0105 22:46:39.746307 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ltdm5"] Jan 05 22:46:40 crc kubenswrapper[4910]: I0105 22:46:40.408293 4910 generic.go:334] "Generic (PLEG): container finished" podID="852869f8-c7b6-4134-843f-0173570fb56c" containerID="d214aab07b678f76d114acb2555e49c53b09e2bd1ca87d8032da2f0230293d02" exitCode=0 Jan 05 22:46:40 crc kubenswrapper[4910]: I0105 22:46:40.408467 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ltdm5" event={"ID":"852869f8-c7b6-4134-843f-0173570fb56c","Type":"ContainerDied","Data":"d214aab07b678f76d114acb2555e49c53b09e2bd1ca87d8032da2f0230293d02"} Jan 05 22:46:40 crc kubenswrapper[4910]: I0105 22:46:40.408763 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ltdm5" event={"ID":"852869f8-c7b6-4134-843f-0173570fb56c","Type":"ContainerStarted","Data":"05b179f166b34d72d6b801590b0d03c9aea645272e1fece6a93a8da56346e03d"} Jan 05 22:46:40 crc kubenswrapper[4910]: I0105 22:46:40.730505 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d3cb396-60d6-4ee6-8821-d181227e51ff" path="/var/lib/kubelet/pods/0d3cb396-60d6-4ee6-8821-d181227e51ff/volumes" Jan 05 22:46:42 crc kubenswrapper[4910]: I0105 22:46:42.428671 4910 generic.go:334] "Generic (PLEG): container finished" podID="852869f8-c7b6-4134-843f-0173570fb56c" containerID="1e861c02f6e7a3483e333b4577c42805eb1908a0624d3f967bec0d9026768545" exitCode=0 Jan 05 22:46:42 crc kubenswrapper[4910]: I0105 22:46:42.428731 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ltdm5" event={"ID":"852869f8-c7b6-4134-843f-0173570fb56c","Type":"ContainerDied","Data":"1e861c02f6e7a3483e333b4577c42805eb1908a0624d3f967bec0d9026768545"} Jan 05 22:46:43 crc kubenswrapper[4910]: I0105 22:46:43.443941 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ltdm5" event={"ID":"852869f8-c7b6-4134-843f-0173570fb56c","Type":"ContainerStarted","Data":"bc7f0712e2eec66b7e8a3fe627ab155bb23fb2ecaee8d7091c30ee83b25e955d"} Jan 05 22:46:43 crc kubenswrapper[4910]: I0105 22:46:43.468661 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-ltdm5" podStartSLOduration=2.849452705 podStartE2EDuration="5.468638361s" podCreationTimestamp="2026-01-05 22:46:38 +0000 UTC" firstStartedPulling="2026-01-05 22:46:40.410705292 +0000 UTC m=+3331.988202962" lastFinishedPulling="2026-01-05 22:46:43.029890918 +0000 UTC m=+3334.607388618" observedRunningTime="2026-01-05 22:46:43.467992365 +0000 UTC m=+3335.045490055" watchObservedRunningTime="2026-01-05 22:46:43.468638361 +0000 UTC m=+3335.046136031" Jan 05 22:46:49 crc kubenswrapper[4910]: I0105 22:46:49.202171 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-ltdm5" Jan 05 22:46:49 crc kubenswrapper[4910]: I0105 22:46:49.203770 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-ltdm5" Jan 05 22:46:49 crc kubenswrapper[4910]: I0105 22:46:49.260578 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-ltdm5" Jan 05 22:46:49 crc kubenswrapper[4910]: I0105 22:46:49.559853 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-ltdm5" Jan 05 22:46:49 crc kubenswrapper[4910]: I0105 22:46:49.631642 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ltdm5"] Jan 05 22:46:51 crc kubenswrapper[4910]: I0105 22:46:51.513818 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-ltdm5" podUID="852869f8-c7b6-4134-843f-0173570fb56c" containerName="registry-server" containerID="cri-o://bc7f0712e2eec66b7e8a3fe627ab155bb23fb2ecaee8d7091c30ee83b25e955d" gracePeriod=2 Jan 05 22:46:52 crc kubenswrapper[4910]: I0105 22:46:52.722211 4910 scope.go:117] "RemoveContainer" containerID="2d6da1b270bbb2d94be361eea2634e5ce4a6bdb9e6d52682e6c76f4580f5be0b" Jan 05 22:46:52 crc kubenswrapper[4910]: E0105 22:46:52.722957 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:46:54 crc kubenswrapper[4910]: I0105 22:46:54.550348 4910 generic.go:334] "Generic (PLEG): container finished" podID="852869f8-c7b6-4134-843f-0173570fb56c" containerID="bc7f0712e2eec66b7e8a3fe627ab155bb23fb2ecaee8d7091c30ee83b25e955d" exitCode=0 Jan 05 22:46:54 crc kubenswrapper[4910]: I0105 22:46:54.550428 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ltdm5" event={"ID":"852869f8-c7b6-4134-843f-0173570fb56c","Type":"ContainerDied","Data":"bc7f0712e2eec66b7e8a3fe627ab155bb23fb2ecaee8d7091c30ee83b25e955d"} Jan 05 22:46:54 crc kubenswrapper[4910]: I0105 22:46:54.699645 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ltdm5" Jan 05 22:46:54 crc kubenswrapper[4910]: I0105 22:46:54.857269 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/852869f8-c7b6-4134-843f-0173570fb56c-utilities\") pod \"852869f8-c7b6-4134-843f-0173570fb56c\" (UID: \"852869f8-c7b6-4134-843f-0173570fb56c\") " Jan 05 22:46:54 crc kubenswrapper[4910]: I0105 22:46:54.857821 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/852869f8-c7b6-4134-843f-0173570fb56c-catalog-content\") pod \"852869f8-c7b6-4134-843f-0173570fb56c\" (UID: \"852869f8-c7b6-4134-843f-0173570fb56c\") " Jan 05 22:46:54 crc kubenswrapper[4910]: I0105 22:46:54.858080 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xqqf8\" (UniqueName: \"kubernetes.io/projected/852869f8-c7b6-4134-843f-0173570fb56c-kube-api-access-xqqf8\") pod \"852869f8-c7b6-4134-843f-0173570fb56c\" (UID: \"852869f8-c7b6-4134-843f-0173570fb56c\") " Jan 05 22:46:54 crc kubenswrapper[4910]: I0105 22:46:54.858388 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/852869f8-c7b6-4134-843f-0173570fb56c-utilities" (OuterVolumeSpecName: "utilities") pod "852869f8-c7b6-4134-843f-0173570fb56c" (UID: "852869f8-c7b6-4134-843f-0173570fb56c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:46:54 crc kubenswrapper[4910]: I0105 22:46:54.862106 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/852869f8-c7b6-4134-843f-0173570fb56c-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 22:46:54 crc kubenswrapper[4910]: I0105 22:46:54.867960 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/852869f8-c7b6-4134-843f-0173570fb56c-kube-api-access-xqqf8" (OuterVolumeSpecName: "kube-api-access-xqqf8") pod "852869f8-c7b6-4134-843f-0173570fb56c" (UID: "852869f8-c7b6-4134-843f-0173570fb56c"). InnerVolumeSpecName "kube-api-access-xqqf8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:46:54 crc kubenswrapper[4910]: I0105 22:46:54.963324 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xqqf8\" (UniqueName: \"kubernetes.io/projected/852869f8-c7b6-4134-843f-0173570fb56c-kube-api-access-xqqf8\") on node \"crc\" DevicePath \"\"" Jan 05 22:46:55 crc kubenswrapper[4910]: I0105 22:46:55.026142 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/852869f8-c7b6-4134-843f-0173570fb56c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "852869f8-c7b6-4134-843f-0173570fb56c" (UID: "852869f8-c7b6-4134-843f-0173570fb56c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:46:55 crc kubenswrapper[4910]: I0105 22:46:55.064401 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/852869f8-c7b6-4134-843f-0173570fb56c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 22:46:55 crc kubenswrapper[4910]: I0105 22:46:55.563418 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ltdm5" event={"ID":"852869f8-c7b6-4134-843f-0173570fb56c","Type":"ContainerDied","Data":"05b179f166b34d72d6b801590b0d03c9aea645272e1fece6a93a8da56346e03d"} Jan 05 22:46:55 crc kubenswrapper[4910]: I0105 22:46:55.563557 4910 scope.go:117] "RemoveContainer" containerID="bc7f0712e2eec66b7e8a3fe627ab155bb23fb2ecaee8d7091c30ee83b25e955d" Jan 05 22:46:55 crc kubenswrapper[4910]: I0105 22:46:55.563584 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ltdm5" Jan 05 22:46:55 crc kubenswrapper[4910]: I0105 22:46:55.605492 4910 scope.go:117] "RemoveContainer" containerID="1e861c02f6e7a3483e333b4577c42805eb1908a0624d3f967bec0d9026768545" Jan 05 22:46:55 crc kubenswrapper[4910]: I0105 22:46:55.627097 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ltdm5"] Jan 05 22:46:55 crc kubenswrapper[4910]: I0105 22:46:55.635657 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-ltdm5"] Jan 05 22:46:55 crc kubenswrapper[4910]: I0105 22:46:55.644719 4910 scope.go:117] "RemoveContainer" containerID="d214aab07b678f76d114acb2555e49c53b09e2bd1ca87d8032da2f0230293d02" Jan 05 22:46:56 crc kubenswrapper[4910]: I0105 22:46:56.734423 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="852869f8-c7b6-4134-843f-0173570fb56c" path="/var/lib/kubelet/pods/852869f8-c7b6-4134-843f-0173570fb56c/volumes" Jan 05 22:47:04 crc kubenswrapper[4910]: I0105 22:47:04.721567 4910 scope.go:117] "RemoveContainer" containerID="2d6da1b270bbb2d94be361eea2634e5ce4a6bdb9e6d52682e6c76f4580f5be0b" Jan 05 22:47:04 crc kubenswrapper[4910]: E0105 22:47:04.722626 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:47:19 crc kubenswrapper[4910]: I0105 22:47:19.722190 4910 scope.go:117] "RemoveContainer" containerID="2d6da1b270bbb2d94be361eea2634e5ce4a6bdb9e6d52682e6c76f4580f5be0b" Jan 05 22:47:19 crc kubenswrapper[4910]: E0105 22:47:19.723107 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:47:30 crc kubenswrapper[4910]: I0105 22:47:30.722188 4910 scope.go:117] "RemoveContainer" containerID="2d6da1b270bbb2d94be361eea2634e5ce4a6bdb9e6d52682e6c76f4580f5be0b" Jan 05 22:47:30 crc kubenswrapper[4910]: E0105 22:47:30.723020 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:47:41 crc kubenswrapper[4910]: I0105 22:47:41.722835 4910 scope.go:117] "RemoveContainer" containerID="2d6da1b270bbb2d94be361eea2634e5ce4a6bdb9e6d52682e6c76f4580f5be0b" Jan 05 22:47:41 crc kubenswrapper[4910]: E0105 22:47:41.723938 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:47:55 crc kubenswrapper[4910]: I0105 22:47:55.723371 4910 scope.go:117] "RemoveContainer" containerID="2d6da1b270bbb2d94be361eea2634e5ce4a6bdb9e6d52682e6c76f4580f5be0b" Jan 05 22:47:55 crc kubenswrapper[4910]: E0105 22:47:55.725926 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:48:07 crc kubenswrapper[4910]: I0105 22:48:07.721894 4910 scope.go:117] "RemoveContainer" containerID="2d6da1b270bbb2d94be361eea2634e5ce4a6bdb9e6d52682e6c76f4580f5be0b" Jan 05 22:48:07 crc kubenswrapper[4910]: E0105 22:48:07.723031 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:48:22 crc kubenswrapper[4910]: I0105 22:48:22.722825 4910 scope.go:117] "RemoveContainer" containerID="2d6da1b270bbb2d94be361eea2634e5ce4a6bdb9e6d52682e6c76f4580f5be0b" Jan 05 22:48:23 crc kubenswrapper[4910]: I0105 22:48:23.377006 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerStarted","Data":"75a83893a0a58c63c935f2690fc7b4f035b95f66633a996666103bb9098b3a4f"} Jan 05 22:49:22 crc kubenswrapper[4910]: I0105 22:49:22.807195 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-6ztk6"] Jan 05 22:49:22 crc kubenswrapper[4910]: E0105 22:49:22.808469 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d3cb396-60d6-4ee6-8821-d181227e51ff" containerName="extract-content" Jan 05 22:49:22 crc kubenswrapper[4910]: I0105 22:49:22.808491 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d3cb396-60d6-4ee6-8821-d181227e51ff" containerName="extract-content" Jan 05 22:49:22 crc kubenswrapper[4910]: E0105 22:49:22.808512 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d3cb396-60d6-4ee6-8821-d181227e51ff" containerName="registry-server" Jan 05 22:49:22 crc kubenswrapper[4910]: I0105 22:49:22.808522 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d3cb396-60d6-4ee6-8821-d181227e51ff" containerName="registry-server" Jan 05 22:49:22 crc kubenswrapper[4910]: E0105 22:49:22.808540 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="852869f8-c7b6-4134-843f-0173570fb56c" containerName="extract-content" Jan 05 22:49:22 crc kubenswrapper[4910]: I0105 22:49:22.808550 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="852869f8-c7b6-4134-843f-0173570fb56c" containerName="extract-content" Jan 05 22:49:22 crc kubenswrapper[4910]: E0105 22:49:22.808564 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="852869f8-c7b6-4134-843f-0173570fb56c" containerName="extract-utilities" Jan 05 22:49:22 crc kubenswrapper[4910]: I0105 22:49:22.808574 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="852869f8-c7b6-4134-843f-0173570fb56c" containerName="extract-utilities" Jan 05 22:49:22 crc kubenswrapper[4910]: E0105 22:49:22.808597 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="852869f8-c7b6-4134-843f-0173570fb56c" containerName="registry-server" Jan 05 22:49:22 crc kubenswrapper[4910]: I0105 22:49:22.808606 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="852869f8-c7b6-4134-843f-0173570fb56c" containerName="registry-server" Jan 05 22:49:22 crc kubenswrapper[4910]: E0105 22:49:22.808629 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d3cb396-60d6-4ee6-8821-d181227e51ff" containerName="extract-utilities" Jan 05 22:49:22 crc kubenswrapper[4910]: I0105 22:49:22.808640 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d3cb396-60d6-4ee6-8821-d181227e51ff" containerName="extract-utilities" Jan 05 22:49:22 crc kubenswrapper[4910]: I0105 22:49:22.808873 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d3cb396-60d6-4ee6-8821-d181227e51ff" containerName="registry-server" Jan 05 22:49:22 crc kubenswrapper[4910]: I0105 22:49:22.808890 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="852869f8-c7b6-4134-843f-0173570fb56c" containerName="registry-server" Jan 05 22:49:22 crc kubenswrapper[4910]: I0105 22:49:22.810916 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6ztk6" Jan 05 22:49:22 crc kubenswrapper[4910]: I0105 22:49:22.826538 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6ztk6"] Jan 05 22:49:22 crc kubenswrapper[4910]: I0105 22:49:22.938401 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8650da71-a93d-454c-8ff4-bce6f19e132d-utilities\") pod \"certified-operators-6ztk6\" (UID: \"8650da71-a93d-454c-8ff4-bce6f19e132d\") " pod="openshift-marketplace/certified-operators-6ztk6" Jan 05 22:49:22 crc kubenswrapper[4910]: I0105 22:49:22.938479 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8650da71-a93d-454c-8ff4-bce6f19e132d-catalog-content\") pod \"certified-operators-6ztk6\" (UID: \"8650da71-a93d-454c-8ff4-bce6f19e132d\") " pod="openshift-marketplace/certified-operators-6ztk6" Jan 05 22:49:22 crc kubenswrapper[4910]: I0105 22:49:22.938740 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zhxgg\" (UniqueName: \"kubernetes.io/projected/8650da71-a93d-454c-8ff4-bce6f19e132d-kube-api-access-zhxgg\") pod \"certified-operators-6ztk6\" (UID: \"8650da71-a93d-454c-8ff4-bce6f19e132d\") " pod="openshift-marketplace/certified-operators-6ztk6" Jan 05 22:49:23 crc kubenswrapper[4910]: I0105 22:49:23.040082 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8650da71-a93d-454c-8ff4-bce6f19e132d-utilities\") pod \"certified-operators-6ztk6\" (UID: \"8650da71-a93d-454c-8ff4-bce6f19e132d\") " pod="openshift-marketplace/certified-operators-6ztk6" Jan 05 22:49:23 crc kubenswrapper[4910]: I0105 22:49:23.040147 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8650da71-a93d-454c-8ff4-bce6f19e132d-catalog-content\") pod \"certified-operators-6ztk6\" (UID: \"8650da71-a93d-454c-8ff4-bce6f19e132d\") " pod="openshift-marketplace/certified-operators-6ztk6" Jan 05 22:49:23 crc kubenswrapper[4910]: I0105 22:49:23.040244 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zhxgg\" (UniqueName: \"kubernetes.io/projected/8650da71-a93d-454c-8ff4-bce6f19e132d-kube-api-access-zhxgg\") pod \"certified-operators-6ztk6\" (UID: \"8650da71-a93d-454c-8ff4-bce6f19e132d\") " pod="openshift-marketplace/certified-operators-6ztk6" Jan 05 22:49:23 crc kubenswrapper[4910]: I0105 22:49:23.040977 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8650da71-a93d-454c-8ff4-bce6f19e132d-utilities\") pod \"certified-operators-6ztk6\" (UID: \"8650da71-a93d-454c-8ff4-bce6f19e132d\") " pod="openshift-marketplace/certified-operators-6ztk6" Jan 05 22:49:23 crc kubenswrapper[4910]: I0105 22:49:23.041061 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8650da71-a93d-454c-8ff4-bce6f19e132d-catalog-content\") pod \"certified-operators-6ztk6\" (UID: \"8650da71-a93d-454c-8ff4-bce6f19e132d\") " pod="openshift-marketplace/certified-operators-6ztk6" Jan 05 22:49:23 crc kubenswrapper[4910]: I0105 22:49:23.062507 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zhxgg\" (UniqueName: \"kubernetes.io/projected/8650da71-a93d-454c-8ff4-bce6f19e132d-kube-api-access-zhxgg\") pod \"certified-operators-6ztk6\" (UID: \"8650da71-a93d-454c-8ff4-bce6f19e132d\") " pod="openshift-marketplace/certified-operators-6ztk6" Jan 05 22:49:23 crc kubenswrapper[4910]: I0105 22:49:23.144434 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6ztk6" Jan 05 22:49:23 crc kubenswrapper[4910]: I0105 22:49:23.671827 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6ztk6"] Jan 05 22:49:23 crc kubenswrapper[4910]: I0105 22:49:23.905273 4910 generic.go:334] "Generic (PLEG): container finished" podID="8650da71-a93d-454c-8ff4-bce6f19e132d" containerID="5a50942337f7c8c3353c1b29172bc62f9117e1982a88b6af5f71fa643d32aff3" exitCode=0 Jan 05 22:49:23 crc kubenswrapper[4910]: I0105 22:49:23.905322 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6ztk6" event={"ID":"8650da71-a93d-454c-8ff4-bce6f19e132d","Type":"ContainerDied","Data":"5a50942337f7c8c3353c1b29172bc62f9117e1982a88b6af5f71fa643d32aff3"} Jan 05 22:49:23 crc kubenswrapper[4910]: I0105 22:49:23.905349 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6ztk6" event={"ID":"8650da71-a93d-454c-8ff4-bce6f19e132d","Type":"ContainerStarted","Data":"b51ad6649fe07624a5c8c2692e40a6256362bcbf4c1128dad57f0ec956a48425"} Jan 05 22:49:24 crc kubenswrapper[4910]: I0105 22:49:24.914202 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6ztk6" event={"ID":"8650da71-a93d-454c-8ff4-bce6f19e132d","Type":"ContainerStarted","Data":"f84b57440e6f4412e7bdc607f8453b175e3a340a5c3c4032bc4b49cbb730bdaa"} Jan 05 22:49:25 crc kubenswrapper[4910]: I0105 22:49:25.929527 4910 generic.go:334] "Generic (PLEG): container finished" podID="8650da71-a93d-454c-8ff4-bce6f19e132d" containerID="f84b57440e6f4412e7bdc607f8453b175e3a340a5c3c4032bc4b49cbb730bdaa" exitCode=0 Jan 05 22:49:25 crc kubenswrapper[4910]: I0105 22:49:25.929581 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6ztk6" event={"ID":"8650da71-a93d-454c-8ff4-bce6f19e132d","Type":"ContainerDied","Data":"f84b57440e6f4412e7bdc607f8453b175e3a340a5c3c4032bc4b49cbb730bdaa"} Jan 05 22:49:26 crc kubenswrapper[4910]: I0105 22:49:26.939080 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6ztk6" event={"ID":"8650da71-a93d-454c-8ff4-bce6f19e132d","Type":"ContainerStarted","Data":"e513283c8f69b937b340282e8649f294c4b954d3caa77d771c1e52e644edc7dc"} Jan 05 22:49:26 crc kubenswrapper[4910]: I0105 22:49:26.968101 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-6ztk6" podStartSLOduration=2.511132392 podStartE2EDuration="4.968085803s" podCreationTimestamp="2026-01-05 22:49:22 +0000 UTC" firstStartedPulling="2026-01-05 22:49:23.906927444 +0000 UTC m=+3495.484425104" lastFinishedPulling="2026-01-05 22:49:26.363880845 +0000 UTC m=+3497.941378515" observedRunningTime="2026-01-05 22:49:26.964440292 +0000 UTC m=+3498.541937962" watchObservedRunningTime="2026-01-05 22:49:26.968085803 +0000 UTC m=+3498.545583473" Jan 05 22:49:33 crc kubenswrapper[4910]: I0105 22:49:33.145109 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-6ztk6" Jan 05 22:49:33 crc kubenswrapper[4910]: I0105 22:49:33.145723 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-6ztk6" Jan 05 22:49:33 crc kubenswrapper[4910]: I0105 22:49:33.209755 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-6ztk6" Jan 05 22:49:34 crc kubenswrapper[4910]: I0105 22:49:34.047395 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-6ztk6" Jan 05 22:49:34 crc kubenswrapper[4910]: I0105 22:49:34.101002 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6ztk6"] Jan 05 22:49:36 crc kubenswrapper[4910]: I0105 22:49:36.003567 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-6ztk6" podUID="8650da71-a93d-454c-8ff4-bce6f19e132d" containerName="registry-server" containerID="cri-o://e513283c8f69b937b340282e8649f294c4b954d3caa77d771c1e52e644edc7dc" gracePeriod=2 Jan 05 22:49:36 crc kubenswrapper[4910]: I0105 22:49:36.911633 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6ztk6" Jan 05 22:49:37 crc kubenswrapper[4910]: I0105 22:49:37.011390 4910 generic.go:334] "Generic (PLEG): container finished" podID="8650da71-a93d-454c-8ff4-bce6f19e132d" containerID="e513283c8f69b937b340282e8649f294c4b954d3caa77d771c1e52e644edc7dc" exitCode=0 Jan 05 22:49:37 crc kubenswrapper[4910]: I0105 22:49:37.011449 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6ztk6" event={"ID":"8650da71-a93d-454c-8ff4-bce6f19e132d","Type":"ContainerDied","Data":"e513283c8f69b937b340282e8649f294c4b954d3caa77d771c1e52e644edc7dc"} Jan 05 22:49:37 crc kubenswrapper[4910]: I0105 22:49:37.011474 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6ztk6" event={"ID":"8650da71-a93d-454c-8ff4-bce6f19e132d","Type":"ContainerDied","Data":"b51ad6649fe07624a5c8c2692e40a6256362bcbf4c1128dad57f0ec956a48425"} Jan 05 22:49:37 crc kubenswrapper[4910]: I0105 22:49:37.011500 4910 scope.go:117] "RemoveContainer" containerID="e513283c8f69b937b340282e8649f294c4b954d3caa77d771c1e52e644edc7dc" Jan 05 22:49:37 crc kubenswrapper[4910]: I0105 22:49:37.011614 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6ztk6" Jan 05 22:49:37 crc kubenswrapper[4910]: I0105 22:49:37.022465 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8650da71-a93d-454c-8ff4-bce6f19e132d-utilities\") pod \"8650da71-a93d-454c-8ff4-bce6f19e132d\" (UID: \"8650da71-a93d-454c-8ff4-bce6f19e132d\") " Jan 05 22:49:37 crc kubenswrapper[4910]: I0105 22:49:37.022795 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8650da71-a93d-454c-8ff4-bce6f19e132d-catalog-content\") pod \"8650da71-a93d-454c-8ff4-bce6f19e132d\" (UID: \"8650da71-a93d-454c-8ff4-bce6f19e132d\") " Jan 05 22:49:37 crc kubenswrapper[4910]: I0105 22:49:37.022835 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zhxgg\" (UniqueName: \"kubernetes.io/projected/8650da71-a93d-454c-8ff4-bce6f19e132d-kube-api-access-zhxgg\") pod \"8650da71-a93d-454c-8ff4-bce6f19e132d\" (UID: \"8650da71-a93d-454c-8ff4-bce6f19e132d\") " Jan 05 22:49:37 crc kubenswrapper[4910]: I0105 22:49:37.023850 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8650da71-a93d-454c-8ff4-bce6f19e132d-utilities" (OuterVolumeSpecName: "utilities") pod "8650da71-a93d-454c-8ff4-bce6f19e132d" (UID: "8650da71-a93d-454c-8ff4-bce6f19e132d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:49:37 crc kubenswrapper[4910]: I0105 22:49:37.027682 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8650da71-a93d-454c-8ff4-bce6f19e132d-kube-api-access-zhxgg" (OuterVolumeSpecName: "kube-api-access-zhxgg") pod "8650da71-a93d-454c-8ff4-bce6f19e132d" (UID: "8650da71-a93d-454c-8ff4-bce6f19e132d"). InnerVolumeSpecName "kube-api-access-zhxgg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:49:37 crc kubenswrapper[4910]: I0105 22:49:37.029350 4910 scope.go:117] "RemoveContainer" containerID="f84b57440e6f4412e7bdc607f8453b175e3a340a5c3c4032bc4b49cbb730bdaa" Jan 05 22:49:37 crc kubenswrapper[4910]: I0105 22:49:37.062704 4910 scope.go:117] "RemoveContainer" containerID="5a50942337f7c8c3353c1b29172bc62f9117e1982a88b6af5f71fa643d32aff3" Jan 05 22:49:37 crc kubenswrapper[4910]: I0105 22:49:37.078193 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8650da71-a93d-454c-8ff4-bce6f19e132d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8650da71-a93d-454c-8ff4-bce6f19e132d" (UID: "8650da71-a93d-454c-8ff4-bce6f19e132d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:49:37 crc kubenswrapper[4910]: I0105 22:49:37.092985 4910 scope.go:117] "RemoveContainer" containerID="e513283c8f69b937b340282e8649f294c4b954d3caa77d771c1e52e644edc7dc" Jan 05 22:49:37 crc kubenswrapper[4910]: E0105 22:49:37.093429 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e513283c8f69b937b340282e8649f294c4b954d3caa77d771c1e52e644edc7dc\": container with ID starting with e513283c8f69b937b340282e8649f294c4b954d3caa77d771c1e52e644edc7dc not found: ID does not exist" containerID="e513283c8f69b937b340282e8649f294c4b954d3caa77d771c1e52e644edc7dc" Jan 05 22:49:37 crc kubenswrapper[4910]: I0105 22:49:37.093470 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e513283c8f69b937b340282e8649f294c4b954d3caa77d771c1e52e644edc7dc"} err="failed to get container status \"e513283c8f69b937b340282e8649f294c4b954d3caa77d771c1e52e644edc7dc\": rpc error: code = NotFound desc = could not find container \"e513283c8f69b937b340282e8649f294c4b954d3caa77d771c1e52e644edc7dc\": container with ID starting with e513283c8f69b937b340282e8649f294c4b954d3caa77d771c1e52e644edc7dc not found: ID does not exist" Jan 05 22:49:37 crc kubenswrapper[4910]: I0105 22:49:37.093497 4910 scope.go:117] "RemoveContainer" containerID="f84b57440e6f4412e7bdc607f8453b175e3a340a5c3c4032bc4b49cbb730bdaa" Jan 05 22:49:37 crc kubenswrapper[4910]: E0105 22:49:37.093817 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f84b57440e6f4412e7bdc607f8453b175e3a340a5c3c4032bc4b49cbb730bdaa\": container with ID starting with f84b57440e6f4412e7bdc607f8453b175e3a340a5c3c4032bc4b49cbb730bdaa not found: ID does not exist" containerID="f84b57440e6f4412e7bdc607f8453b175e3a340a5c3c4032bc4b49cbb730bdaa" Jan 05 22:49:37 crc kubenswrapper[4910]: I0105 22:49:37.093875 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f84b57440e6f4412e7bdc607f8453b175e3a340a5c3c4032bc4b49cbb730bdaa"} err="failed to get container status \"f84b57440e6f4412e7bdc607f8453b175e3a340a5c3c4032bc4b49cbb730bdaa\": rpc error: code = NotFound desc = could not find container \"f84b57440e6f4412e7bdc607f8453b175e3a340a5c3c4032bc4b49cbb730bdaa\": container with ID starting with f84b57440e6f4412e7bdc607f8453b175e3a340a5c3c4032bc4b49cbb730bdaa not found: ID does not exist" Jan 05 22:49:37 crc kubenswrapper[4910]: I0105 22:49:37.093898 4910 scope.go:117] "RemoveContainer" containerID="5a50942337f7c8c3353c1b29172bc62f9117e1982a88b6af5f71fa643d32aff3" Jan 05 22:49:37 crc kubenswrapper[4910]: E0105 22:49:37.094144 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a50942337f7c8c3353c1b29172bc62f9117e1982a88b6af5f71fa643d32aff3\": container with ID starting with 5a50942337f7c8c3353c1b29172bc62f9117e1982a88b6af5f71fa643d32aff3 not found: ID does not exist" containerID="5a50942337f7c8c3353c1b29172bc62f9117e1982a88b6af5f71fa643d32aff3" Jan 05 22:49:37 crc kubenswrapper[4910]: I0105 22:49:37.094170 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a50942337f7c8c3353c1b29172bc62f9117e1982a88b6af5f71fa643d32aff3"} err="failed to get container status \"5a50942337f7c8c3353c1b29172bc62f9117e1982a88b6af5f71fa643d32aff3\": rpc error: code = NotFound desc = could not find container \"5a50942337f7c8c3353c1b29172bc62f9117e1982a88b6af5f71fa643d32aff3\": container with ID starting with 5a50942337f7c8c3353c1b29172bc62f9117e1982a88b6af5f71fa643d32aff3 not found: ID does not exist" Jan 05 22:49:37 crc kubenswrapper[4910]: I0105 22:49:37.124417 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8650da71-a93d-454c-8ff4-bce6f19e132d-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 22:49:37 crc kubenswrapper[4910]: I0105 22:49:37.124451 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8650da71-a93d-454c-8ff4-bce6f19e132d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 22:49:37 crc kubenswrapper[4910]: I0105 22:49:37.124462 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zhxgg\" (UniqueName: \"kubernetes.io/projected/8650da71-a93d-454c-8ff4-bce6f19e132d-kube-api-access-zhxgg\") on node \"crc\" DevicePath \"\"" Jan 05 22:49:37 crc kubenswrapper[4910]: I0105 22:49:37.346470 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6ztk6"] Jan 05 22:49:37 crc kubenswrapper[4910]: I0105 22:49:37.351738 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-6ztk6"] Jan 05 22:49:38 crc kubenswrapper[4910]: I0105 22:49:38.748235 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8650da71-a93d-454c-8ff4-bce6f19e132d" path="/var/lib/kubelet/pods/8650da71-a93d-454c-8ff4-bce6f19e132d/volumes" Jan 05 22:49:52 crc kubenswrapper[4910]: I0105 22:49:52.534519 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-t8ws4"] Jan 05 22:49:52 crc kubenswrapper[4910]: E0105 22:49:52.535538 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8650da71-a93d-454c-8ff4-bce6f19e132d" containerName="registry-server" Jan 05 22:49:52 crc kubenswrapper[4910]: I0105 22:49:52.535557 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="8650da71-a93d-454c-8ff4-bce6f19e132d" containerName="registry-server" Jan 05 22:49:52 crc kubenswrapper[4910]: E0105 22:49:52.535571 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8650da71-a93d-454c-8ff4-bce6f19e132d" containerName="extract-utilities" Jan 05 22:49:52 crc kubenswrapper[4910]: I0105 22:49:52.535580 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="8650da71-a93d-454c-8ff4-bce6f19e132d" containerName="extract-utilities" Jan 05 22:49:52 crc kubenswrapper[4910]: E0105 22:49:52.535611 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8650da71-a93d-454c-8ff4-bce6f19e132d" containerName="extract-content" Jan 05 22:49:52 crc kubenswrapper[4910]: I0105 22:49:52.535620 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="8650da71-a93d-454c-8ff4-bce6f19e132d" containerName="extract-content" Jan 05 22:49:52 crc kubenswrapper[4910]: I0105 22:49:52.535778 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="8650da71-a93d-454c-8ff4-bce6f19e132d" containerName="registry-server" Jan 05 22:49:52 crc kubenswrapper[4910]: I0105 22:49:52.537139 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t8ws4" Jan 05 22:49:52 crc kubenswrapper[4910]: I0105 22:49:52.564340 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-t8ws4"] Jan 05 22:49:52 crc kubenswrapper[4910]: I0105 22:49:52.652892 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fd6a0a6-ec06-4c35-909f-4e647841f975-utilities\") pod \"community-operators-t8ws4\" (UID: \"6fd6a0a6-ec06-4c35-909f-4e647841f975\") " pod="openshift-marketplace/community-operators-t8ws4" Jan 05 22:49:52 crc kubenswrapper[4910]: I0105 22:49:52.653075 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ksqbg\" (UniqueName: \"kubernetes.io/projected/6fd6a0a6-ec06-4c35-909f-4e647841f975-kube-api-access-ksqbg\") pod \"community-operators-t8ws4\" (UID: \"6fd6a0a6-ec06-4c35-909f-4e647841f975\") " pod="openshift-marketplace/community-operators-t8ws4" Jan 05 22:49:52 crc kubenswrapper[4910]: I0105 22:49:52.653265 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fd6a0a6-ec06-4c35-909f-4e647841f975-catalog-content\") pod \"community-operators-t8ws4\" (UID: \"6fd6a0a6-ec06-4c35-909f-4e647841f975\") " pod="openshift-marketplace/community-operators-t8ws4" Jan 05 22:49:52 crc kubenswrapper[4910]: I0105 22:49:52.755226 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fd6a0a6-ec06-4c35-909f-4e647841f975-utilities\") pod \"community-operators-t8ws4\" (UID: \"6fd6a0a6-ec06-4c35-909f-4e647841f975\") " pod="openshift-marketplace/community-operators-t8ws4" Jan 05 22:49:52 crc kubenswrapper[4910]: I0105 22:49:52.755639 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fd6a0a6-ec06-4c35-909f-4e647841f975-utilities\") pod \"community-operators-t8ws4\" (UID: \"6fd6a0a6-ec06-4c35-909f-4e647841f975\") " pod="openshift-marketplace/community-operators-t8ws4" Jan 05 22:49:52 crc kubenswrapper[4910]: I0105 22:49:52.755735 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ksqbg\" (UniqueName: \"kubernetes.io/projected/6fd6a0a6-ec06-4c35-909f-4e647841f975-kube-api-access-ksqbg\") pod \"community-operators-t8ws4\" (UID: \"6fd6a0a6-ec06-4c35-909f-4e647841f975\") " pod="openshift-marketplace/community-operators-t8ws4" Jan 05 22:49:52 crc kubenswrapper[4910]: I0105 22:49:52.755870 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fd6a0a6-ec06-4c35-909f-4e647841f975-catalog-content\") pod \"community-operators-t8ws4\" (UID: \"6fd6a0a6-ec06-4c35-909f-4e647841f975\") " pod="openshift-marketplace/community-operators-t8ws4" Jan 05 22:49:52 crc kubenswrapper[4910]: I0105 22:49:52.756337 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fd6a0a6-ec06-4c35-909f-4e647841f975-catalog-content\") pod \"community-operators-t8ws4\" (UID: \"6fd6a0a6-ec06-4c35-909f-4e647841f975\") " pod="openshift-marketplace/community-operators-t8ws4" Jan 05 22:49:52 crc kubenswrapper[4910]: I0105 22:49:52.781178 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ksqbg\" (UniqueName: \"kubernetes.io/projected/6fd6a0a6-ec06-4c35-909f-4e647841f975-kube-api-access-ksqbg\") pod \"community-operators-t8ws4\" (UID: \"6fd6a0a6-ec06-4c35-909f-4e647841f975\") " pod="openshift-marketplace/community-operators-t8ws4" Jan 05 22:49:52 crc kubenswrapper[4910]: I0105 22:49:52.856278 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t8ws4" Jan 05 22:49:53 crc kubenswrapper[4910]: I0105 22:49:53.153320 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-t8ws4"] Jan 05 22:49:54 crc kubenswrapper[4910]: I0105 22:49:54.147432 4910 generic.go:334] "Generic (PLEG): container finished" podID="6fd6a0a6-ec06-4c35-909f-4e647841f975" containerID="34e982673e4f5875c3eefb3d03f1bb7d99183f141afe6410eaddd534e6e3e2e6" exitCode=0 Jan 05 22:49:54 crc kubenswrapper[4910]: I0105 22:49:54.147499 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t8ws4" event={"ID":"6fd6a0a6-ec06-4c35-909f-4e647841f975","Type":"ContainerDied","Data":"34e982673e4f5875c3eefb3d03f1bb7d99183f141afe6410eaddd534e6e3e2e6"} Jan 05 22:49:54 crc kubenswrapper[4910]: I0105 22:49:54.147893 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t8ws4" event={"ID":"6fd6a0a6-ec06-4c35-909f-4e647841f975","Type":"ContainerStarted","Data":"4cd8710ca63d86d38d44dc5e9d863b7dad0bde055ed7b693cbad45e493c75eb5"} Jan 05 22:49:56 crc kubenswrapper[4910]: I0105 22:49:56.161972 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t8ws4" event={"ID":"6fd6a0a6-ec06-4c35-909f-4e647841f975","Type":"ContainerStarted","Data":"bf450bd89d917fcbbd9655f7cd26bb2fc9e404d82a6d5c2d67527839f153850e"} Jan 05 22:49:57 crc kubenswrapper[4910]: I0105 22:49:57.168417 4910 generic.go:334] "Generic (PLEG): container finished" podID="6fd6a0a6-ec06-4c35-909f-4e647841f975" containerID="bf450bd89d917fcbbd9655f7cd26bb2fc9e404d82a6d5c2d67527839f153850e" exitCode=0 Jan 05 22:49:57 crc kubenswrapper[4910]: I0105 22:49:57.168456 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t8ws4" event={"ID":"6fd6a0a6-ec06-4c35-909f-4e647841f975","Type":"ContainerDied","Data":"bf450bd89d917fcbbd9655f7cd26bb2fc9e404d82a6d5c2d67527839f153850e"} Jan 05 22:49:58 crc kubenswrapper[4910]: I0105 22:49:58.186826 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t8ws4" event={"ID":"6fd6a0a6-ec06-4c35-909f-4e647841f975","Type":"ContainerStarted","Data":"c9b2ed51a70537b3be48878590c7e6fc262523b74987a9f46952fa78696f872d"} Jan 05 22:49:58 crc kubenswrapper[4910]: I0105 22:49:58.206291 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-t8ws4" podStartSLOduration=2.656111976 podStartE2EDuration="6.206274464s" podCreationTimestamp="2026-01-05 22:49:52 +0000 UTC" firstStartedPulling="2026-01-05 22:49:54.150978726 +0000 UTC m=+3525.728476436" lastFinishedPulling="2026-01-05 22:49:57.701141254 +0000 UTC m=+3529.278638924" observedRunningTime="2026-01-05 22:49:58.202368557 +0000 UTC m=+3529.779866227" watchObservedRunningTime="2026-01-05 22:49:58.206274464 +0000 UTC m=+3529.783772124" Jan 05 22:50:02 crc kubenswrapper[4910]: I0105 22:50:02.857096 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-t8ws4" Jan 05 22:50:02 crc kubenswrapper[4910]: I0105 22:50:02.858176 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-t8ws4" Jan 05 22:50:02 crc kubenswrapper[4910]: I0105 22:50:02.913303 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-t8ws4" Jan 05 22:50:03 crc kubenswrapper[4910]: I0105 22:50:03.295091 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-t8ws4" Jan 05 22:50:03 crc kubenswrapper[4910]: I0105 22:50:03.374209 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-t8ws4"] Jan 05 22:50:05 crc kubenswrapper[4910]: I0105 22:50:05.243911 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-t8ws4" podUID="6fd6a0a6-ec06-4c35-909f-4e647841f975" containerName="registry-server" containerID="cri-o://c9b2ed51a70537b3be48878590c7e6fc262523b74987a9f46952fa78696f872d" gracePeriod=2 Jan 05 22:50:06 crc kubenswrapper[4910]: I0105 22:50:06.254334 4910 generic.go:334] "Generic (PLEG): container finished" podID="6fd6a0a6-ec06-4c35-909f-4e647841f975" containerID="c9b2ed51a70537b3be48878590c7e6fc262523b74987a9f46952fa78696f872d" exitCode=0 Jan 05 22:50:06 crc kubenswrapper[4910]: I0105 22:50:06.255913 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t8ws4" event={"ID":"6fd6a0a6-ec06-4c35-909f-4e647841f975","Type":"ContainerDied","Data":"c9b2ed51a70537b3be48878590c7e6fc262523b74987a9f46952fa78696f872d"} Jan 05 22:50:06 crc kubenswrapper[4910]: I0105 22:50:06.329841 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t8ws4" Jan 05 22:50:06 crc kubenswrapper[4910]: I0105 22:50:06.402784 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ksqbg\" (UniqueName: \"kubernetes.io/projected/6fd6a0a6-ec06-4c35-909f-4e647841f975-kube-api-access-ksqbg\") pod \"6fd6a0a6-ec06-4c35-909f-4e647841f975\" (UID: \"6fd6a0a6-ec06-4c35-909f-4e647841f975\") " Jan 05 22:50:06 crc kubenswrapper[4910]: I0105 22:50:06.402920 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fd6a0a6-ec06-4c35-909f-4e647841f975-catalog-content\") pod \"6fd6a0a6-ec06-4c35-909f-4e647841f975\" (UID: \"6fd6a0a6-ec06-4c35-909f-4e647841f975\") " Jan 05 22:50:06 crc kubenswrapper[4910]: I0105 22:50:06.403102 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fd6a0a6-ec06-4c35-909f-4e647841f975-utilities\") pod \"6fd6a0a6-ec06-4c35-909f-4e647841f975\" (UID: \"6fd6a0a6-ec06-4c35-909f-4e647841f975\") " Jan 05 22:50:06 crc kubenswrapper[4910]: I0105 22:50:06.404331 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6fd6a0a6-ec06-4c35-909f-4e647841f975-utilities" (OuterVolumeSpecName: "utilities") pod "6fd6a0a6-ec06-4c35-909f-4e647841f975" (UID: "6fd6a0a6-ec06-4c35-909f-4e647841f975"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:50:06 crc kubenswrapper[4910]: I0105 22:50:06.411777 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6fd6a0a6-ec06-4c35-909f-4e647841f975-kube-api-access-ksqbg" (OuterVolumeSpecName: "kube-api-access-ksqbg") pod "6fd6a0a6-ec06-4c35-909f-4e647841f975" (UID: "6fd6a0a6-ec06-4c35-909f-4e647841f975"). InnerVolumeSpecName "kube-api-access-ksqbg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:50:06 crc kubenswrapper[4910]: I0105 22:50:06.472599 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6fd6a0a6-ec06-4c35-909f-4e647841f975-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6fd6a0a6-ec06-4c35-909f-4e647841f975" (UID: "6fd6a0a6-ec06-4c35-909f-4e647841f975"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:50:06 crc kubenswrapper[4910]: I0105 22:50:06.505798 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fd6a0a6-ec06-4c35-909f-4e647841f975-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 22:50:06 crc kubenswrapper[4910]: I0105 22:50:06.505841 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ksqbg\" (UniqueName: \"kubernetes.io/projected/6fd6a0a6-ec06-4c35-909f-4e647841f975-kube-api-access-ksqbg\") on node \"crc\" DevicePath \"\"" Jan 05 22:50:06 crc kubenswrapper[4910]: I0105 22:50:06.505856 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fd6a0a6-ec06-4c35-909f-4e647841f975-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 22:50:07 crc kubenswrapper[4910]: I0105 22:50:07.278489 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t8ws4" event={"ID":"6fd6a0a6-ec06-4c35-909f-4e647841f975","Type":"ContainerDied","Data":"4cd8710ca63d86d38d44dc5e9d863b7dad0bde055ed7b693cbad45e493c75eb5"} Jan 05 22:50:07 crc kubenswrapper[4910]: I0105 22:50:07.278575 4910 scope.go:117] "RemoveContainer" containerID="c9b2ed51a70537b3be48878590c7e6fc262523b74987a9f46952fa78696f872d" Jan 05 22:50:07 crc kubenswrapper[4910]: I0105 22:50:07.278635 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t8ws4" Jan 05 22:50:07 crc kubenswrapper[4910]: I0105 22:50:07.309779 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-t8ws4"] Jan 05 22:50:07 crc kubenswrapper[4910]: I0105 22:50:07.313038 4910 scope.go:117] "RemoveContainer" containerID="bf450bd89d917fcbbd9655f7cd26bb2fc9e404d82a6d5c2d67527839f153850e" Jan 05 22:50:07 crc kubenswrapper[4910]: I0105 22:50:07.316941 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-t8ws4"] Jan 05 22:50:07 crc kubenswrapper[4910]: I0105 22:50:07.338594 4910 scope.go:117] "RemoveContainer" containerID="34e982673e4f5875c3eefb3d03f1bb7d99183f141afe6410eaddd534e6e3e2e6" Jan 05 22:50:08 crc kubenswrapper[4910]: I0105 22:50:08.740747 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6fd6a0a6-ec06-4c35-909f-4e647841f975" path="/var/lib/kubelet/pods/6fd6a0a6-ec06-4c35-909f-4e647841f975/volumes" Jan 05 22:50:40 crc kubenswrapper[4910]: I0105 22:50:40.952631 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:50:40 crc kubenswrapper[4910]: I0105 22:50:40.953229 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:51:10 crc kubenswrapper[4910]: I0105 22:51:10.952765 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:51:10 crc kubenswrapper[4910]: I0105 22:51:10.954231 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:51:40 crc kubenswrapper[4910]: I0105 22:51:40.952803 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:51:40 crc kubenswrapper[4910]: I0105 22:51:40.953615 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:51:40 crc kubenswrapper[4910]: I0105 22:51:40.953679 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 22:51:40 crc kubenswrapper[4910]: I0105 22:51:40.954455 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"75a83893a0a58c63c935f2690fc7b4f035b95f66633a996666103bb9098b3a4f"} pod="openshift-machine-config-operator/machine-config-daemon-p4t85" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 05 22:51:40 crc kubenswrapper[4910]: I0105 22:51:40.954552 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" containerID="cri-o://75a83893a0a58c63c935f2690fc7b4f035b95f66633a996666103bb9098b3a4f" gracePeriod=600 Jan 05 22:51:41 crc kubenswrapper[4910]: I0105 22:51:41.158662 4910 generic.go:334] "Generic (PLEG): container finished" podID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerID="75a83893a0a58c63c935f2690fc7b4f035b95f66633a996666103bb9098b3a4f" exitCode=0 Jan 05 22:51:41 crc kubenswrapper[4910]: I0105 22:51:41.158781 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerDied","Data":"75a83893a0a58c63c935f2690fc7b4f035b95f66633a996666103bb9098b3a4f"} Jan 05 22:51:41 crc kubenswrapper[4910]: I0105 22:51:41.159363 4910 scope.go:117] "RemoveContainer" containerID="2d6da1b270bbb2d94be361eea2634e5ce4a6bdb9e6d52682e6c76f4580f5be0b" Jan 05 22:51:42 crc kubenswrapper[4910]: I0105 22:51:42.175990 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerStarted","Data":"ad49db6d7400b47b6d444963f4908f27fa4dbe9bca288c41277849995e7bf497"} Jan 05 22:54:10 crc kubenswrapper[4910]: I0105 22:54:10.952828 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:54:10 crc kubenswrapper[4910]: I0105 22:54:10.954514 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:54:40 crc kubenswrapper[4910]: I0105 22:54:40.952895 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:54:40 crc kubenswrapper[4910]: I0105 22:54:40.953512 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:55:10 crc kubenswrapper[4910]: I0105 22:55:10.952667 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 22:55:10 crc kubenswrapper[4910]: I0105 22:55:10.953399 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 22:55:10 crc kubenswrapper[4910]: I0105 22:55:10.953460 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 22:55:10 crc kubenswrapper[4910]: I0105 22:55:10.954248 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ad49db6d7400b47b6d444963f4908f27fa4dbe9bca288c41277849995e7bf497"} pod="openshift-machine-config-operator/machine-config-daemon-p4t85" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 05 22:55:10 crc kubenswrapper[4910]: I0105 22:55:10.954318 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" containerID="cri-o://ad49db6d7400b47b6d444963f4908f27fa4dbe9bca288c41277849995e7bf497" gracePeriod=600 Jan 05 22:55:11 crc kubenswrapper[4910]: E0105 22:55:11.812240 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:55:12 crc kubenswrapper[4910]: I0105 22:55:12.215858 4910 generic.go:334] "Generic (PLEG): container finished" podID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerID="ad49db6d7400b47b6d444963f4908f27fa4dbe9bca288c41277849995e7bf497" exitCode=0 Jan 05 22:55:12 crc kubenswrapper[4910]: I0105 22:55:12.215912 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerDied","Data":"ad49db6d7400b47b6d444963f4908f27fa4dbe9bca288c41277849995e7bf497"} Jan 05 22:55:12 crc kubenswrapper[4910]: I0105 22:55:12.216000 4910 scope.go:117] "RemoveContainer" containerID="75a83893a0a58c63c935f2690fc7b4f035b95f66633a996666103bb9098b3a4f" Jan 05 22:55:12 crc kubenswrapper[4910]: I0105 22:55:12.216762 4910 scope.go:117] "RemoveContainer" containerID="ad49db6d7400b47b6d444963f4908f27fa4dbe9bca288c41277849995e7bf497" Jan 05 22:55:12 crc kubenswrapper[4910]: E0105 22:55:12.217472 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:55:26 crc kubenswrapper[4910]: I0105 22:55:26.722504 4910 scope.go:117] "RemoveContainer" containerID="ad49db6d7400b47b6d444963f4908f27fa4dbe9bca288c41277849995e7bf497" Jan 05 22:55:26 crc kubenswrapper[4910]: E0105 22:55:26.723637 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:55:38 crc kubenswrapper[4910]: I0105 22:55:38.730895 4910 scope.go:117] "RemoveContainer" containerID="ad49db6d7400b47b6d444963f4908f27fa4dbe9bca288c41277849995e7bf497" Jan 05 22:55:38 crc kubenswrapper[4910]: E0105 22:55:38.732251 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:55:53 crc kubenswrapper[4910]: I0105 22:55:53.722111 4910 scope.go:117] "RemoveContainer" containerID="ad49db6d7400b47b6d444963f4908f27fa4dbe9bca288c41277849995e7bf497" Jan 05 22:55:53 crc kubenswrapper[4910]: E0105 22:55:53.723341 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:56:04 crc kubenswrapper[4910]: I0105 22:56:04.722588 4910 scope.go:117] "RemoveContainer" containerID="ad49db6d7400b47b6d444963f4908f27fa4dbe9bca288c41277849995e7bf497" Jan 05 22:56:04 crc kubenswrapper[4910]: E0105 22:56:04.723695 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:56:18 crc kubenswrapper[4910]: I0105 22:56:18.732359 4910 scope.go:117] "RemoveContainer" containerID="ad49db6d7400b47b6d444963f4908f27fa4dbe9bca288c41277849995e7bf497" Jan 05 22:56:18 crc kubenswrapper[4910]: E0105 22:56:18.733634 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:56:31 crc kubenswrapper[4910]: I0105 22:56:31.721209 4910 scope.go:117] "RemoveContainer" containerID="ad49db6d7400b47b6d444963f4908f27fa4dbe9bca288c41277849995e7bf497" Jan 05 22:56:31 crc kubenswrapper[4910]: E0105 22:56:31.722027 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:56:44 crc kubenswrapper[4910]: I0105 22:56:44.723012 4910 scope.go:117] "RemoveContainer" containerID="ad49db6d7400b47b6d444963f4908f27fa4dbe9bca288c41277849995e7bf497" Jan 05 22:56:44 crc kubenswrapper[4910]: E0105 22:56:44.724324 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:56:56 crc kubenswrapper[4910]: I0105 22:56:56.722809 4910 scope.go:117] "RemoveContainer" containerID="ad49db6d7400b47b6d444963f4908f27fa4dbe9bca288c41277849995e7bf497" Jan 05 22:56:56 crc kubenswrapper[4910]: E0105 22:56:56.724340 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:57:03 crc kubenswrapper[4910]: I0105 22:57:03.228700 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-pcrnm"] Jan 05 22:57:03 crc kubenswrapper[4910]: E0105 22:57:03.229740 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fd6a0a6-ec06-4c35-909f-4e647841f975" containerName="registry-server" Jan 05 22:57:03 crc kubenswrapper[4910]: I0105 22:57:03.229756 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fd6a0a6-ec06-4c35-909f-4e647841f975" containerName="registry-server" Jan 05 22:57:03 crc kubenswrapper[4910]: E0105 22:57:03.229800 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fd6a0a6-ec06-4c35-909f-4e647841f975" containerName="extract-utilities" Jan 05 22:57:03 crc kubenswrapper[4910]: I0105 22:57:03.229807 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fd6a0a6-ec06-4c35-909f-4e647841f975" containerName="extract-utilities" Jan 05 22:57:03 crc kubenswrapper[4910]: E0105 22:57:03.229818 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fd6a0a6-ec06-4c35-909f-4e647841f975" containerName="extract-content" Jan 05 22:57:03 crc kubenswrapper[4910]: I0105 22:57:03.229825 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fd6a0a6-ec06-4c35-909f-4e647841f975" containerName="extract-content" Jan 05 22:57:03 crc kubenswrapper[4910]: I0105 22:57:03.229996 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fd6a0a6-ec06-4c35-909f-4e647841f975" containerName="registry-server" Jan 05 22:57:03 crc kubenswrapper[4910]: I0105 22:57:03.231057 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pcrnm" Jan 05 22:57:03 crc kubenswrapper[4910]: I0105 22:57:03.246435 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pcrnm"] Jan 05 22:57:03 crc kubenswrapper[4910]: I0105 22:57:03.335527 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkvh8\" (UniqueName: \"kubernetes.io/projected/cfe90571-da72-4efd-b249-7cbb7b0080c9-kube-api-access-pkvh8\") pod \"redhat-operators-pcrnm\" (UID: \"cfe90571-da72-4efd-b249-7cbb7b0080c9\") " pod="openshift-marketplace/redhat-operators-pcrnm" Jan 05 22:57:03 crc kubenswrapper[4910]: I0105 22:57:03.335583 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cfe90571-da72-4efd-b249-7cbb7b0080c9-catalog-content\") pod \"redhat-operators-pcrnm\" (UID: \"cfe90571-da72-4efd-b249-7cbb7b0080c9\") " pod="openshift-marketplace/redhat-operators-pcrnm" Jan 05 22:57:03 crc kubenswrapper[4910]: I0105 22:57:03.335617 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cfe90571-da72-4efd-b249-7cbb7b0080c9-utilities\") pod \"redhat-operators-pcrnm\" (UID: \"cfe90571-da72-4efd-b249-7cbb7b0080c9\") " pod="openshift-marketplace/redhat-operators-pcrnm" Jan 05 22:57:03 crc kubenswrapper[4910]: I0105 22:57:03.437171 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pkvh8\" (UniqueName: \"kubernetes.io/projected/cfe90571-da72-4efd-b249-7cbb7b0080c9-kube-api-access-pkvh8\") pod \"redhat-operators-pcrnm\" (UID: \"cfe90571-da72-4efd-b249-7cbb7b0080c9\") " pod="openshift-marketplace/redhat-operators-pcrnm" Jan 05 22:57:03 crc kubenswrapper[4910]: I0105 22:57:03.437223 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cfe90571-da72-4efd-b249-7cbb7b0080c9-catalog-content\") pod \"redhat-operators-pcrnm\" (UID: \"cfe90571-da72-4efd-b249-7cbb7b0080c9\") " pod="openshift-marketplace/redhat-operators-pcrnm" Jan 05 22:57:03 crc kubenswrapper[4910]: I0105 22:57:03.437266 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cfe90571-da72-4efd-b249-7cbb7b0080c9-utilities\") pod \"redhat-operators-pcrnm\" (UID: \"cfe90571-da72-4efd-b249-7cbb7b0080c9\") " pod="openshift-marketplace/redhat-operators-pcrnm" Jan 05 22:57:03 crc kubenswrapper[4910]: I0105 22:57:03.437814 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cfe90571-da72-4efd-b249-7cbb7b0080c9-utilities\") pod \"redhat-operators-pcrnm\" (UID: \"cfe90571-da72-4efd-b249-7cbb7b0080c9\") " pod="openshift-marketplace/redhat-operators-pcrnm" Jan 05 22:57:03 crc kubenswrapper[4910]: I0105 22:57:03.437881 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cfe90571-da72-4efd-b249-7cbb7b0080c9-catalog-content\") pod \"redhat-operators-pcrnm\" (UID: \"cfe90571-da72-4efd-b249-7cbb7b0080c9\") " pod="openshift-marketplace/redhat-operators-pcrnm" Jan 05 22:57:03 crc kubenswrapper[4910]: I0105 22:57:03.465700 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pkvh8\" (UniqueName: \"kubernetes.io/projected/cfe90571-da72-4efd-b249-7cbb7b0080c9-kube-api-access-pkvh8\") pod \"redhat-operators-pcrnm\" (UID: \"cfe90571-da72-4efd-b249-7cbb7b0080c9\") " pod="openshift-marketplace/redhat-operators-pcrnm" Jan 05 22:57:03 crc kubenswrapper[4910]: I0105 22:57:03.554149 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pcrnm" Jan 05 22:57:04 crc kubenswrapper[4910]: I0105 22:57:04.051034 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pcrnm"] Jan 05 22:57:04 crc kubenswrapper[4910]: I0105 22:57:04.407495 4910 generic.go:334] "Generic (PLEG): container finished" podID="cfe90571-da72-4efd-b249-7cbb7b0080c9" containerID="dcfc010ce7a1a1ba693d649efb97e826217d84985cfb1786ae5da453d518a2ee" exitCode=0 Jan 05 22:57:04 crc kubenswrapper[4910]: I0105 22:57:04.407545 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pcrnm" event={"ID":"cfe90571-da72-4efd-b249-7cbb7b0080c9","Type":"ContainerDied","Data":"dcfc010ce7a1a1ba693d649efb97e826217d84985cfb1786ae5da453d518a2ee"} Jan 05 22:57:04 crc kubenswrapper[4910]: I0105 22:57:04.407891 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pcrnm" event={"ID":"cfe90571-da72-4efd-b249-7cbb7b0080c9","Type":"ContainerStarted","Data":"b270b8f86d85c772c5f36c865dfbf0f4a38b5f4138b219c9f24bfd41dca50d99"} Jan 05 22:57:04 crc kubenswrapper[4910]: I0105 22:57:04.409424 4910 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 05 22:57:05 crc kubenswrapper[4910]: I0105 22:57:05.418889 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pcrnm" event={"ID":"cfe90571-da72-4efd-b249-7cbb7b0080c9","Type":"ContainerStarted","Data":"2b42410e66c1e1553c7368bd276079931db525c73be6c0b63339dc5e9f6e3cd3"} Jan 05 22:57:06 crc kubenswrapper[4910]: I0105 22:57:06.432100 4910 generic.go:334] "Generic (PLEG): container finished" podID="cfe90571-da72-4efd-b249-7cbb7b0080c9" containerID="2b42410e66c1e1553c7368bd276079931db525c73be6c0b63339dc5e9f6e3cd3" exitCode=0 Jan 05 22:57:06 crc kubenswrapper[4910]: I0105 22:57:06.432242 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pcrnm" event={"ID":"cfe90571-da72-4efd-b249-7cbb7b0080c9","Type":"ContainerDied","Data":"2b42410e66c1e1553c7368bd276079931db525c73be6c0b63339dc5e9f6e3cd3"} Jan 05 22:57:07 crc kubenswrapper[4910]: I0105 22:57:07.442413 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pcrnm" event={"ID":"cfe90571-da72-4efd-b249-7cbb7b0080c9","Type":"ContainerStarted","Data":"a3c987cd5e2e6d694a837f65c347b4e03475e91379f2fc439daeb4aff702efcc"} Jan 05 22:57:07 crc kubenswrapper[4910]: I0105 22:57:07.461105 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-pcrnm" podStartSLOduration=1.990815729 podStartE2EDuration="4.461081762s" podCreationTimestamp="2026-01-05 22:57:03 +0000 UTC" firstStartedPulling="2026-01-05 22:57:04.409059612 +0000 UTC m=+3955.986557282" lastFinishedPulling="2026-01-05 22:57:06.879325635 +0000 UTC m=+3958.456823315" observedRunningTime="2026-01-05 22:57:07.459877302 +0000 UTC m=+3959.037374982" watchObservedRunningTime="2026-01-05 22:57:07.461081762 +0000 UTC m=+3959.038579442" Jan 05 22:57:11 crc kubenswrapper[4910]: I0105 22:57:11.721328 4910 scope.go:117] "RemoveContainer" containerID="ad49db6d7400b47b6d444963f4908f27fa4dbe9bca288c41277849995e7bf497" Jan 05 22:57:11 crc kubenswrapper[4910]: E0105 22:57:11.722066 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:57:13 crc kubenswrapper[4910]: I0105 22:57:13.554611 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-pcrnm" Jan 05 22:57:13 crc kubenswrapper[4910]: I0105 22:57:13.556299 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-pcrnm" Jan 05 22:57:13 crc kubenswrapper[4910]: I0105 22:57:13.607349 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-pcrnm" Jan 05 22:57:14 crc kubenswrapper[4910]: I0105 22:57:14.836948 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-pcrnm" Jan 05 22:57:14 crc kubenswrapper[4910]: I0105 22:57:14.889970 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pcrnm"] Jan 05 22:57:16 crc kubenswrapper[4910]: I0105 22:57:16.522568 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-pcrnm" podUID="cfe90571-da72-4efd-b249-7cbb7b0080c9" containerName="registry-server" containerID="cri-o://a3c987cd5e2e6d694a837f65c347b4e03475e91379f2fc439daeb4aff702efcc" gracePeriod=2 Jan 05 22:57:18 crc kubenswrapper[4910]: I0105 22:57:18.544006 4910 generic.go:334] "Generic (PLEG): container finished" podID="cfe90571-da72-4efd-b249-7cbb7b0080c9" containerID="a3c987cd5e2e6d694a837f65c347b4e03475e91379f2fc439daeb4aff702efcc" exitCode=0 Jan 05 22:57:18 crc kubenswrapper[4910]: I0105 22:57:18.544086 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pcrnm" event={"ID":"cfe90571-da72-4efd-b249-7cbb7b0080c9","Type":"ContainerDied","Data":"a3c987cd5e2e6d694a837f65c347b4e03475e91379f2fc439daeb4aff702efcc"} Jan 05 22:57:18 crc kubenswrapper[4910]: I0105 22:57:18.733800 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pcrnm" Jan 05 22:57:18 crc kubenswrapper[4910]: I0105 22:57:18.899195 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cfe90571-da72-4efd-b249-7cbb7b0080c9-catalog-content\") pod \"cfe90571-da72-4efd-b249-7cbb7b0080c9\" (UID: \"cfe90571-da72-4efd-b249-7cbb7b0080c9\") " Jan 05 22:57:18 crc kubenswrapper[4910]: I0105 22:57:18.899428 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cfe90571-da72-4efd-b249-7cbb7b0080c9-utilities\") pod \"cfe90571-da72-4efd-b249-7cbb7b0080c9\" (UID: \"cfe90571-da72-4efd-b249-7cbb7b0080c9\") " Jan 05 22:57:18 crc kubenswrapper[4910]: I0105 22:57:18.899528 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pkvh8\" (UniqueName: \"kubernetes.io/projected/cfe90571-da72-4efd-b249-7cbb7b0080c9-kube-api-access-pkvh8\") pod \"cfe90571-da72-4efd-b249-7cbb7b0080c9\" (UID: \"cfe90571-da72-4efd-b249-7cbb7b0080c9\") " Jan 05 22:57:18 crc kubenswrapper[4910]: I0105 22:57:18.900373 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cfe90571-da72-4efd-b249-7cbb7b0080c9-utilities" (OuterVolumeSpecName: "utilities") pod "cfe90571-da72-4efd-b249-7cbb7b0080c9" (UID: "cfe90571-da72-4efd-b249-7cbb7b0080c9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:57:18 crc kubenswrapper[4910]: I0105 22:57:18.905295 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cfe90571-da72-4efd-b249-7cbb7b0080c9-kube-api-access-pkvh8" (OuterVolumeSpecName: "kube-api-access-pkvh8") pod "cfe90571-da72-4efd-b249-7cbb7b0080c9" (UID: "cfe90571-da72-4efd-b249-7cbb7b0080c9"). InnerVolumeSpecName "kube-api-access-pkvh8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 22:57:19 crc kubenswrapper[4910]: I0105 22:57:19.000989 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cfe90571-da72-4efd-b249-7cbb7b0080c9-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 22:57:19 crc kubenswrapper[4910]: I0105 22:57:19.001024 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pkvh8\" (UniqueName: \"kubernetes.io/projected/cfe90571-da72-4efd-b249-7cbb7b0080c9-kube-api-access-pkvh8\") on node \"crc\" DevicePath \"\"" Jan 05 22:57:19 crc kubenswrapper[4910]: I0105 22:57:19.032816 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cfe90571-da72-4efd-b249-7cbb7b0080c9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cfe90571-da72-4efd-b249-7cbb7b0080c9" (UID: "cfe90571-da72-4efd-b249-7cbb7b0080c9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 22:57:19 crc kubenswrapper[4910]: I0105 22:57:19.102533 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cfe90571-da72-4efd-b249-7cbb7b0080c9-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 22:57:19 crc kubenswrapper[4910]: I0105 22:57:19.554159 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pcrnm" event={"ID":"cfe90571-da72-4efd-b249-7cbb7b0080c9","Type":"ContainerDied","Data":"b270b8f86d85c772c5f36c865dfbf0f4a38b5f4138b219c9f24bfd41dca50d99"} Jan 05 22:57:19 crc kubenswrapper[4910]: I0105 22:57:19.554275 4910 scope.go:117] "RemoveContainer" containerID="a3c987cd5e2e6d694a837f65c347b4e03475e91379f2fc439daeb4aff702efcc" Jan 05 22:57:19 crc kubenswrapper[4910]: I0105 22:57:19.554206 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pcrnm" Jan 05 22:57:19 crc kubenswrapper[4910]: I0105 22:57:19.587314 4910 scope.go:117] "RemoveContainer" containerID="2b42410e66c1e1553c7368bd276079931db525c73be6c0b63339dc5e9f6e3cd3" Jan 05 22:57:19 crc kubenswrapper[4910]: I0105 22:57:19.596177 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pcrnm"] Jan 05 22:57:19 crc kubenswrapper[4910]: I0105 22:57:19.610239 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-pcrnm"] Jan 05 22:57:19 crc kubenswrapper[4910]: I0105 22:57:19.631518 4910 scope.go:117] "RemoveContainer" containerID="dcfc010ce7a1a1ba693d649efb97e826217d84985cfb1786ae5da453d518a2ee" Jan 05 22:57:20 crc kubenswrapper[4910]: I0105 22:57:20.736737 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cfe90571-da72-4efd-b249-7cbb7b0080c9" path="/var/lib/kubelet/pods/cfe90571-da72-4efd-b249-7cbb7b0080c9/volumes" Jan 05 22:57:25 crc kubenswrapper[4910]: I0105 22:57:25.722008 4910 scope.go:117] "RemoveContainer" containerID="ad49db6d7400b47b6d444963f4908f27fa4dbe9bca288c41277849995e7bf497" Jan 05 22:57:25 crc kubenswrapper[4910]: E0105 22:57:25.722751 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:57:38 crc kubenswrapper[4910]: I0105 22:57:38.731326 4910 scope.go:117] "RemoveContainer" containerID="ad49db6d7400b47b6d444963f4908f27fa4dbe9bca288c41277849995e7bf497" Jan 05 22:57:38 crc kubenswrapper[4910]: E0105 22:57:38.732879 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:57:53 crc kubenswrapper[4910]: I0105 22:57:53.722498 4910 scope.go:117] "RemoveContainer" containerID="ad49db6d7400b47b6d444963f4908f27fa4dbe9bca288c41277849995e7bf497" Jan 05 22:57:53 crc kubenswrapper[4910]: E0105 22:57:53.723800 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:58:05 crc kubenswrapper[4910]: I0105 22:58:05.722459 4910 scope.go:117] "RemoveContainer" containerID="ad49db6d7400b47b6d444963f4908f27fa4dbe9bca288c41277849995e7bf497" Jan 05 22:58:05 crc kubenswrapper[4910]: E0105 22:58:05.723974 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:58:20 crc kubenswrapper[4910]: I0105 22:58:20.722361 4910 scope.go:117] "RemoveContainer" containerID="ad49db6d7400b47b6d444963f4908f27fa4dbe9bca288c41277849995e7bf497" Jan 05 22:58:20 crc kubenswrapper[4910]: E0105 22:58:20.723295 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:58:31 crc kubenswrapper[4910]: I0105 22:58:31.722284 4910 scope.go:117] "RemoveContainer" containerID="ad49db6d7400b47b6d444963f4908f27fa4dbe9bca288c41277849995e7bf497" Jan 05 22:58:31 crc kubenswrapper[4910]: E0105 22:58:31.723341 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:58:45 crc kubenswrapper[4910]: I0105 22:58:45.722047 4910 scope.go:117] "RemoveContainer" containerID="ad49db6d7400b47b6d444963f4908f27fa4dbe9bca288c41277849995e7bf497" Jan 05 22:58:45 crc kubenswrapper[4910]: E0105 22:58:45.723202 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:59:00 crc kubenswrapper[4910]: I0105 22:59:00.722864 4910 scope.go:117] "RemoveContainer" containerID="ad49db6d7400b47b6d444963f4908f27fa4dbe9bca288c41277849995e7bf497" Jan 05 22:59:00 crc kubenswrapper[4910]: E0105 22:59:00.724104 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:59:13 crc kubenswrapper[4910]: I0105 22:59:13.722812 4910 scope.go:117] "RemoveContainer" containerID="ad49db6d7400b47b6d444963f4908f27fa4dbe9bca288c41277849995e7bf497" Jan 05 22:59:13 crc kubenswrapper[4910]: E0105 22:59:13.724641 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:59:28 crc kubenswrapper[4910]: I0105 22:59:28.722460 4910 scope.go:117] "RemoveContainer" containerID="ad49db6d7400b47b6d444963f4908f27fa4dbe9bca288c41277849995e7bf497" Jan 05 22:59:28 crc kubenswrapper[4910]: E0105 22:59:28.723274 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:59:39 crc kubenswrapper[4910]: I0105 22:59:39.722549 4910 scope.go:117] "RemoveContainer" containerID="ad49db6d7400b47b6d444963f4908f27fa4dbe9bca288c41277849995e7bf497" Jan 05 22:59:39 crc kubenswrapper[4910]: E0105 22:59:39.723412 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:59:48 crc kubenswrapper[4910]: I0105 22:59:48.833281 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-tb2rf"] Jan 05 22:59:48 crc kubenswrapper[4910]: E0105 22:59:48.834071 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfe90571-da72-4efd-b249-7cbb7b0080c9" containerName="extract-content" Jan 05 22:59:48 crc kubenswrapper[4910]: I0105 22:59:48.834085 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfe90571-da72-4efd-b249-7cbb7b0080c9" containerName="extract-content" Jan 05 22:59:48 crc kubenswrapper[4910]: E0105 22:59:48.834137 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfe90571-da72-4efd-b249-7cbb7b0080c9" containerName="extract-utilities" Jan 05 22:59:48 crc kubenswrapper[4910]: I0105 22:59:48.834144 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfe90571-da72-4efd-b249-7cbb7b0080c9" containerName="extract-utilities" Jan 05 22:59:48 crc kubenswrapper[4910]: E0105 22:59:48.834154 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfe90571-da72-4efd-b249-7cbb7b0080c9" containerName="registry-server" Jan 05 22:59:48 crc kubenswrapper[4910]: I0105 22:59:48.834159 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfe90571-da72-4efd-b249-7cbb7b0080c9" containerName="registry-server" Jan 05 22:59:48 crc kubenswrapper[4910]: I0105 22:59:48.834293 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="cfe90571-da72-4efd-b249-7cbb7b0080c9" containerName="registry-server" Jan 05 22:59:48 crc kubenswrapper[4910]: I0105 22:59:48.835303 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tb2rf" Jan 05 22:59:48 crc kubenswrapper[4910]: I0105 22:59:48.868837 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tb2rf"] Jan 05 22:59:49 crc kubenswrapper[4910]: I0105 22:59:49.033360 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hk8nv\" (UniqueName: \"kubernetes.io/projected/d7b1342f-1eb2-4a6b-a00a-45798ac4de9d-kube-api-access-hk8nv\") pod \"certified-operators-tb2rf\" (UID: \"d7b1342f-1eb2-4a6b-a00a-45798ac4de9d\") " pod="openshift-marketplace/certified-operators-tb2rf" Jan 05 22:59:49 crc kubenswrapper[4910]: I0105 22:59:49.033420 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d7b1342f-1eb2-4a6b-a00a-45798ac4de9d-catalog-content\") pod \"certified-operators-tb2rf\" (UID: \"d7b1342f-1eb2-4a6b-a00a-45798ac4de9d\") " pod="openshift-marketplace/certified-operators-tb2rf" Jan 05 22:59:49 crc kubenswrapper[4910]: I0105 22:59:49.033451 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d7b1342f-1eb2-4a6b-a00a-45798ac4de9d-utilities\") pod \"certified-operators-tb2rf\" (UID: \"d7b1342f-1eb2-4a6b-a00a-45798ac4de9d\") " pod="openshift-marketplace/certified-operators-tb2rf" Jan 05 22:59:49 crc kubenswrapper[4910]: I0105 22:59:49.135209 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hk8nv\" (UniqueName: \"kubernetes.io/projected/d7b1342f-1eb2-4a6b-a00a-45798ac4de9d-kube-api-access-hk8nv\") pod \"certified-operators-tb2rf\" (UID: \"d7b1342f-1eb2-4a6b-a00a-45798ac4de9d\") " pod="openshift-marketplace/certified-operators-tb2rf" Jan 05 22:59:49 crc kubenswrapper[4910]: I0105 22:59:49.135271 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d7b1342f-1eb2-4a6b-a00a-45798ac4de9d-catalog-content\") pod \"certified-operators-tb2rf\" (UID: \"d7b1342f-1eb2-4a6b-a00a-45798ac4de9d\") " pod="openshift-marketplace/certified-operators-tb2rf" Jan 05 22:59:49 crc kubenswrapper[4910]: I0105 22:59:49.135306 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d7b1342f-1eb2-4a6b-a00a-45798ac4de9d-utilities\") pod \"certified-operators-tb2rf\" (UID: \"d7b1342f-1eb2-4a6b-a00a-45798ac4de9d\") " pod="openshift-marketplace/certified-operators-tb2rf" Jan 05 22:59:49 crc kubenswrapper[4910]: I0105 22:59:49.135832 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d7b1342f-1eb2-4a6b-a00a-45798ac4de9d-utilities\") pod \"certified-operators-tb2rf\" (UID: \"d7b1342f-1eb2-4a6b-a00a-45798ac4de9d\") " pod="openshift-marketplace/certified-operators-tb2rf" Jan 05 22:59:49 crc kubenswrapper[4910]: I0105 22:59:49.136002 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d7b1342f-1eb2-4a6b-a00a-45798ac4de9d-catalog-content\") pod \"certified-operators-tb2rf\" (UID: \"d7b1342f-1eb2-4a6b-a00a-45798ac4de9d\") " pod="openshift-marketplace/certified-operators-tb2rf" Jan 05 22:59:49 crc kubenswrapper[4910]: I0105 22:59:49.154584 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hk8nv\" (UniqueName: \"kubernetes.io/projected/d7b1342f-1eb2-4a6b-a00a-45798ac4de9d-kube-api-access-hk8nv\") pod \"certified-operators-tb2rf\" (UID: \"d7b1342f-1eb2-4a6b-a00a-45798ac4de9d\") " pod="openshift-marketplace/certified-operators-tb2rf" Jan 05 22:59:49 crc kubenswrapper[4910]: I0105 22:59:49.212610 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tb2rf" Jan 05 22:59:49 crc kubenswrapper[4910]: I0105 22:59:49.494579 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tb2rf"] Jan 05 22:59:49 crc kubenswrapper[4910]: I0105 22:59:49.968043 4910 generic.go:334] "Generic (PLEG): container finished" podID="d7b1342f-1eb2-4a6b-a00a-45798ac4de9d" containerID="8eb6ea2a722f7ca11cf0da5abe731ee4a131bab06ef48015600eadb7bcb9402c" exitCode=0 Jan 05 22:59:49 crc kubenswrapper[4910]: I0105 22:59:49.968117 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tb2rf" event={"ID":"d7b1342f-1eb2-4a6b-a00a-45798ac4de9d","Type":"ContainerDied","Data":"8eb6ea2a722f7ca11cf0da5abe731ee4a131bab06ef48015600eadb7bcb9402c"} Jan 05 22:59:49 crc kubenswrapper[4910]: I0105 22:59:49.968175 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tb2rf" event={"ID":"d7b1342f-1eb2-4a6b-a00a-45798ac4de9d","Type":"ContainerStarted","Data":"aeb15585327532836ee254c934c17106b3e1b24f13d61e07592eb8bfd49c7e31"} Jan 05 22:59:50 crc kubenswrapper[4910]: I0105 22:59:50.722366 4910 scope.go:117] "RemoveContainer" containerID="ad49db6d7400b47b6d444963f4908f27fa4dbe9bca288c41277849995e7bf497" Jan 05 22:59:50 crc kubenswrapper[4910]: E0105 22:59:50.723252 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 22:59:50 crc kubenswrapper[4910]: I0105 22:59:50.977192 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tb2rf" event={"ID":"d7b1342f-1eb2-4a6b-a00a-45798ac4de9d","Type":"ContainerStarted","Data":"292bb9681c21dbe68545ef9a976ede5182c957eb74893df7149e0da01001f982"} Jan 05 22:59:51 crc kubenswrapper[4910]: I0105 22:59:51.991497 4910 generic.go:334] "Generic (PLEG): container finished" podID="d7b1342f-1eb2-4a6b-a00a-45798ac4de9d" containerID="292bb9681c21dbe68545ef9a976ede5182c957eb74893df7149e0da01001f982" exitCode=0 Jan 05 22:59:51 crc kubenswrapper[4910]: I0105 22:59:51.991550 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tb2rf" event={"ID":"d7b1342f-1eb2-4a6b-a00a-45798ac4de9d","Type":"ContainerDied","Data":"292bb9681c21dbe68545ef9a976ede5182c957eb74893df7149e0da01001f982"} Jan 05 22:59:53 crc kubenswrapper[4910]: I0105 22:59:53.002002 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tb2rf" event={"ID":"d7b1342f-1eb2-4a6b-a00a-45798ac4de9d","Type":"ContainerStarted","Data":"9d1e5da4a64e61d2790049384850b6ecc24a7f8e5b7bb8890da723181feac35b"} Jan 05 22:59:53 crc kubenswrapper[4910]: I0105 22:59:53.031379 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-tb2rf" podStartSLOduration=2.591829292 podStartE2EDuration="5.031346426s" podCreationTimestamp="2026-01-05 22:59:48 +0000 UTC" firstStartedPulling="2026-01-05 22:59:49.969867393 +0000 UTC m=+4121.547365063" lastFinishedPulling="2026-01-05 22:59:52.409384517 +0000 UTC m=+4123.986882197" observedRunningTime="2026-01-05 22:59:53.025695416 +0000 UTC m=+4124.603193096" watchObservedRunningTime="2026-01-05 22:59:53.031346426 +0000 UTC m=+4124.608844096" Jan 05 22:59:59 crc kubenswrapper[4910]: I0105 22:59:59.213763 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-tb2rf" Jan 05 22:59:59 crc kubenswrapper[4910]: I0105 22:59:59.214404 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-tb2rf" Jan 05 22:59:59 crc kubenswrapper[4910]: I0105 22:59:59.276479 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-tb2rf" Jan 05 23:00:00 crc kubenswrapper[4910]: I0105 23:00:00.137972 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-tb2rf" Jan 05 23:00:00 crc kubenswrapper[4910]: I0105 23:00:00.200573 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tb2rf"] Jan 05 23:00:00 crc kubenswrapper[4910]: I0105 23:00:00.235381 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460900-fhd54"] Jan 05 23:00:00 crc kubenswrapper[4910]: I0105 23:00:00.236599 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460900-fhd54" Jan 05 23:00:00 crc kubenswrapper[4910]: I0105 23:00:00.239540 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 05 23:00:00 crc kubenswrapper[4910]: I0105 23:00:00.239935 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 05 23:00:00 crc kubenswrapper[4910]: I0105 23:00:00.250181 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460900-fhd54"] Jan 05 23:00:00 crc kubenswrapper[4910]: I0105 23:00:00.327087 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a43b047b-40b6-4c3d-aac2-eb352229d2c2-config-volume\") pod \"collect-profiles-29460900-fhd54\" (UID: \"a43b047b-40b6-4c3d-aac2-eb352229d2c2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460900-fhd54" Jan 05 23:00:00 crc kubenswrapper[4910]: I0105 23:00:00.327198 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a43b047b-40b6-4c3d-aac2-eb352229d2c2-secret-volume\") pod \"collect-profiles-29460900-fhd54\" (UID: \"a43b047b-40b6-4c3d-aac2-eb352229d2c2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460900-fhd54" Jan 05 23:00:00 crc kubenswrapper[4910]: I0105 23:00:00.327266 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-88bf6\" (UniqueName: \"kubernetes.io/projected/a43b047b-40b6-4c3d-aac2-eb352229d2c2-kube-api-access-88bf6\") pod \"collect-profiles-29460900-fhd54\" (UID: \"a43b047b-40b6-4c3d-aac2-eb352229d2c2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460900-fhd54" Jan 05 23:00:00 crc kubenswrapper[4910]: I0105 23:00:00.429634 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a43b047b-40b6-4c3d-aac2-eb352229d2c2-config-volume\") pod \"collect-profiles-29460900-fhd54\" (UID: \"a43b047b-40b6-4c3d-aac2-eb352229d2c2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460900-fhd54" Jan 05 23:00:00 crc kubenswrapper[4910]: I0105 23:00:00.429699 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a43b047b-40b6-4c3d-aac2-eb352229d2c2-secret-volume\") pod \"collect-profiles-29460900-fhd54\" (UID: \"a43b047b-40b6-4c3d-aac2-eb352229d2c2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460900-fhd54" Jan 05 23:00:00 crc kubenswrapper[4910]: I0105 23:00:00.429732 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-88bf6\" (UniqueName: \"kubernetes.io/projected/a43b047b-40b6-4c3d-aac2-eb352229d2c2-kube-api-access-88bf6\") pod \"collect-profiles-29460900-fhd54\" (UID: \"a43b047b-40b6-4c3d-aac2-eb352229d2c2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460900-fhd54" Jan 05 23:00:00 crc kubenswrapper[4910]: I0105 23:00:00.430831 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a43b047b-40b6-4c3d-aac2-eb352229d2c2-config-volume\") pod \"collect-profiles-29460900-fhd54\" (UID: \"a43b047b-40b6-4c3d-aac2-eb352229d2c2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460900-fhd54" Jan 05 23:00:00 crc kubenswrapper[4910]: I0105 23:00:00.446986 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a43b047b-40b6-4c3d-aac2-eb352229d2c2-secret-volume\") pod \"collect-profiles-29460900-fhd54\" (UID: \"a43b047b-40b6-4c3d-aac2-eb352229d2c2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460900-fhd54" Jan 05 23:00:00 crc kubenswrapper[4910]: I0105 23:00:00.448006 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-88bf6\" (UniqueName: \"kubernetes.io/projected/a43b047b-40b6-4c3d-aac2-eb352229d2c2-kube-api-access-88bf6\") pod \"collect-profiles-29460900-fhd54\" (UID: \"a43b047b-40b6-4c3d-aac2-eb352229d2c2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460900-fhd54" Jan 05 23:00:00 crc kubenswrapper[4910]: I0105 23:00:00.557806 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460900-fhd54" Jan 05 23:00:01 crc kubenswrapper[4910]: I0105 23:00:01.026739 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460900-fhd54"] Jan 05 23:00:01 crc kubenswrapper[4910]: I0105 23:00:01.075700 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29460900-fhd54" event={"ID":"a43b047b-40b6-4c3d-aac2-eb352229d2c2","Type":"ContainerStarted","Data":"9b0ab743218015c7c161483d23caf14bd1611df82cad7d8a3b7e983b38295120"} Jan 05 23:00:02 crc kubenswrapper[4910]: I0105 23:00:02.090902 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29460900-fhd54" event={"ID":"a43b047b-40b6-4c3d-aac2-eb352229d2c2","Type":"ContainerStarted","Data":"e74b7dcd4b4780a90abe19f26974ed7a15f032529791505154b0e145f1d1da1a"} Jan 05 23:00:02 crc kubenswrapper[4910]: I0105 23:00:02.092952 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-tb2rf" podUID="d7b1342f-1eb2-4a6b-a00a-45798ac4de9d" containerName="registry-server" containerID="cri-o://9d1e5da4a64e61d2790049384850b6ecc24a7f8e5b7bb8890da723181feac35b" gracePeriod=2 Jan 05 23:00:02 crc kubenswrapper[4910]: I0105 23:00:02.110015 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29460900-fhd54" podStartSLOduration=2.109982494 podStartE2EDuration="2.109982494s" podCreationTimestamp="2026-01-05 23:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:00:02.109533023 +0000 UTC m=+4133.687030733" watchObservedRunningTime="2026-01-05 23:00:02.109982494 +0000 UTC m=+4133.687480174" Jan 05 23:00:02 crc kubenswrapper[4910]: I0105 23:00:02.531927 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tb2rf" Jan 05 23:00:02 crc kubenswrapper[4910]: I0105 23:00:02.667562 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d7b1342f-1eb2-4a6b-a00a-45798ac4de9d-catalog-content\") pod \"d7b1342f-1eb2-4a6b-a00a-45798ac4de9d\" (UID: \"d7b1342f-1eb2-4a6b-a00a-45798ac4de9d\") " Jan 05 23:00:02 crc kubenswrapper[4910]: I0105 23:00:02.667848 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d7b1342f-1eb2-4a6b-a00a-45798ac4de9d-utilities\") pod \"d7b1342f-1eb2-4a6b-a00a-45798ac4de9d\" (UID: \"d7b1342f-1eb2-4a6b-a00a-45798ac4de9d\") " Jan 05 23:00:02 crc kubenswrapper[4910]: I0105 23:00:02.667949 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hk8nv\" (UniqueName: \"kubernetes.io/projected/d7b1342f-1eb2-4a6b-a00a-45798ac4de9d-kube-api-access-hk8nv\") pod \"d7b1342f-1eb2-4a6b-a00a-45798ac4de9d\" (UID: \"d7b1342f-1eb2-4a6b-a00a-45798ac4de9d\") " Jan 05 23:00:02 crc kubenswrapper[4910]: I0105 23:00:02.669180 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d7b1342f-1eb2-4a6b-a00a-45798ac4de9d-utilities" (OuterVolumeSpecName: "utilities") pod "d7b1342f-1eb2-4a6b-a00a-45798ac4de9d" (UID: "d7b1342f-1eb2-4a6b-a00a-45798ac4de9d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:00:02 crc kubenswrapper[4910]: I0105 23:00:02.694421 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7b1342f-1eb2-4a6b-a00a-45798ac4de9d-kube-api-access-hk8nv" (OuterVolumeSpecName: "kube-api-access-hk8nv") pod "d7b1342f-1eb2-4a6b-a00a-45798ac4de9d" (UID: "d7b1342f-1eb2-4a6b-a00a-45798ac4de9d"). InnerVolumeSpecName "kube-api-access-hk8nv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:00:02 crc kubenswrapper[4910]: I0105 23:00:02.726817 4910 scope.go:117] "RemoveContainer" containerID="ad49db6d7400b47b6d444963f4908f27fa4dbe9bca288c41277849995e7bf497" Jan 05 23:00:02 crc kubenswrapper[4910]: E0105 23:00:02.727435 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:00:02 crc kubenswrapper[4910]: I0105 23:00:02.750087 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d7b1342f-1eb2-4a6b-a00a-45798ac4de9d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d7b1342f-1eb2-4a6b-a00a-45798ac4de9d" (UID: "d7b1342f-1eb2-4a6b-a00a-45798ac4de9d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:00:02 crc kubenswrapper[4910]: I0105 23:00:02.770349 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d7b1342f-1eb2-4a6b-a00a-45798ac4de9d-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 23:00:02 crc kubenswrapper[4910]: I0105 23:00:02.770397 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hk8nv\" (UniqueName: \"kubernetes.io/projected/d7b1342f-1eb2-4a6b-a00a-45798ac4de9d-kube-api-access-hk8nv\") on node \"crc\" DevicePath \"\"" Jan 05 23:00:02 crc kubenswrapper[4910]: I0105 23:00:02.770409 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d7b1342f-1eb2-4a6b-a00a-45798ac4de9d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 23:00:03 crc kubenswrapper[4910]: I0105 23:00:03.101151 4910 generic.go:334] "Generic (PLEG): container finished" podID="a43b047b-40b6-4c3d-aac2-eb352229d2c2" containerID="e74b7dcd4b4780a90abe19f26974ed7a15f032529791505154b0e145f1d1da1a" exitCode=0 Jan 05 23:00:03 crc kubenswrapper[4910]: I0105 23:00:03.101306 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29460900-fhd54" event={"ID":"a43b047b-40b6-4c3d-aac2-eb352229d2c2","Type":"ContainerDied","Data":"e74b7dcd4b4780a90abe19f26974ed7a15f032529791505154b0e145f1d1da1a"} Jan 05 23:00:03 crc kubenswrapper[4910]: I0105 23:00:03.105728 4910 generic.go:334] "Generic (PLEG): container finished" podID="d7b1342f-1eb2-4a6b-a00a-45798ac4de9d" containerID="9d1e5da4a64e61d2790049384850b6ecc24a7f8e5b7bb8890da723181feac35b" exitCode=0 Jan 05 23:00:03 crc kubenswrapper[4910]: I0105 23:00:03.105788 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tb2rf" event={"ID":"d7b1342f-1eb2-4a6b-a00a-45798ac4de9d","Type":"ContainerDied","Data":"9d1e5da4a64e61d2790049384850b6ecc24a7f8e5b7bb8890da723181feac35b"} Jan 05 23:00:03 crc kubenswrapper[4910]: I0105 23:00:03.105835 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tb2rf" event={"ID":"d7b1342f-1eb2-4a6b-a00a-45798ac4de9d","Type":"ContainerDied","Data":"aeb15585327532836ee254c934c17106b3e1b24f13d61e07592eb8bfd49c7e31"} Jan 05 23:00:03 crc kubenswrapper[4910]: I0105 23:00:03.105859 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tb2rf" Jan 05 23:00:03 crc kubenswrapper[4910]: I0105 23:00:03.105867 4910 scope.go:117] "RemoveContainer" containerID="9d1e5da4a64e61d2790049384850b6ecc24a7f8e5b7bb8890da723181feac35b" Jan 05 23:00:03 crc kubenswrapper[4910]: I0105 23:00:03.133374 4910 scope.go:117] "RemoveContainer" containerID="292bb9681c21dbe68545ef9a976ede5182c957eb74893df7149e0da01001f982" Jan 05 23:00:03 crc kubenswrapper[4910]: I0105 23:00:03.147786 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tb2rf"] Jan 05 23:00:03 crc kubenswrapper[4910]: I0105 23:00:03.154004 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-tb2rf"] Jan 05 23:00:03 crc kubenswrapper[4910]: I0105 23:00:03.159358 4910 scope.go:117] "RemoveContainer" containerID="8eb6ea2a722f7ca11cf0da5abe731ee4a131bab06ef48015600eadb7bcb9402c" Jan 05 23:00:03 crc kubenswrapper[4910]: I0105 23:00:03.183522 4910 scope.go:117] "RemoveContainer" containerID="9d1e5da4a64e61d2790049384850b6ecc24a7f8e5b7bb8890da723181feac35b" Jan 05 23:00:03 crc kubenswrapper[4910]: E0105 23:00:03.184089 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d1e5da4a64e61d2790049384850b6ecc24a7f8e5b7bb8890da723181feac35b\": container with ID starting with 9d1e5da4a64e61d2790049384850b6ecc24a7f8e5b7bb8890da723181feac35b not found: ID does not exist" containerID="9d1e5da4a64e61d2790049384850b6ecc24a7f8e5b7bb8890da723181feac35b" Jan 05 23:00:03 crc kubenswrapper[4910]: I0105 23:00:03.184140 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d1e5da4a64e61d2790049384850b6ecc24a7f8e5b7bb8890da723181feac35b"} err="failed to get container status \"9d1e5da4a64e61d2790049384850b6ecc24a7f8e5b7bb8890da723181feac35b\": rpc error: code = NotFound desc = could not find container \"9d1e5da4a64e61d2790049384850b6ecc24a7f8e5b7bb8890da723181feac35b\": container with ID starting with 9d1e5da4a64e61d2790049384850b6ecc24a7f8e5b7bb8890da723181feac35b not found: ID does not exist" Jan 05 23:00:03 crc kubenswrapper[4910]: I0105 23:00:03.184162 4910 scope.go:117] "RemoveContainer" containerID="292bb9681c21dbe68545ef9a976ede5182c957eb74893df7149e0da01001f982" Jan 05 23:00:03 crc kubenswrapper[4910]: E0105 23:00:03.187373 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"292bb9681c21dbe68545ef9a976ede5182c957eb74893df7149e0da01001f982\": container with ID starting with 292bb9681c21dbe68545ef9a976ede5182c957eb74893df7149e0da01001f982 not found: ID does not exist" containerID="292bb9681c21dbe68545ef9a976ede5182c957eb74893df7149e0da01001f982" Jan 05 23:00:03 crc kubenswrapper[4910]: I0105 23:00:03.187423 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"292bb9681c21dbe68545ef9a976ede5182c957eb74893df7149e0da01001f982"} err="failed to get container status \"292bb9681c21dbe68545ef9a976ede5182c957eb74893df7149e0da01001f982\": rpc error: code = NotFound desc = could not find container \"292bb9681c21dbe68545ef9a976ede5182c957eb74893df7149e0da01001f982\": container with ID starting with 292bb9681c21dbe68545ef9a976ede5182c957eb74893df7149e0da01001f982 not found: ID does not exist" Jan 05 23:00:03 crc kubenswrapper[4910]: I0105 23:00:03.187439 4910 scope.go:117] "RemoveContainer" containerID="8eb6ea2a722f7ca11cf0da5abe731ee4a131bab06ef48015600eadb7bcb9402c" Jan 05 23:00:03 crc kubenswrapper[4910]: E0105 23:00:03.187910 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8eb6ea2a722f7ca11cf0da5abe731ee4a131bab06ef48015600eadb7bcb9402c\": container with ID starting with 8eb6ea2a722f7ca11cf0da5abe731ee4a131bab06ef48015600eadb7bcb9402c not found: ID does not exist" containerID="8eb6ea2a722f7ca11cf0da5abe731ee4a131bab06ef48015600eadb7bcb9402c" Jan 05 23:00:03 crc kubenswrapper[4910]: I0105 23:00:03.187982 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8eb6ea2a722f7ca11cf0da5abe731ee4a131bab06ef48015600eadb7bcb9402c"} err="failed to get container status \"8eb6ea2a722f7ca11cf0da5abe731ee4a131bab06ef48015600eadb7bcb9402c\": rpc error: code = NotFound desc = could not find container \"8eb6ea2a722f7ca11cf0da5abe731ee4a131bab06ef48015600eadb7bcb9402c\": container with ID starting with 8eb6ea2a722f7ca11cf0da5abe731ee4a131bab06ef48015600eadb7bcb9402c not found: ID does not exist" Jan 05 23:00:04 crc kubenswrapper[4910]: I0105 23:00:04.434864 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460900-fhd54" Jan 05 23:00:04 crc kubenswrapper[4910]: I0105 23:00:04.499378 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a43b047b-40b6-4c3d-aac2-eb352229d2c2-secret-volume\") pod \"a43b047b-40b6-4c3d-aac2-eb352229d2c2\" (UID: \"a43b047b-40b6-4c3d-aac2-eb352229d2c2\") " Jan 05 23:00:04 crc kubenswrapper[4910]: I0105 23:00:04.499745 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-88bf6\" (UniqueName: \"kubernetes.io/projected/a43b047b-40b6-4c3d-aac2-eb352229d2c2-kube-api-access-88bf6\") pod \"a43b047b-40b6-4c3d-aac2-eb352229d2c2\" (UID: \"a43b047b-40b6-4c3d-aac2-eb352229d2c2\") " Jan 05 23:00:04 crc kubenswrapper[4910]: I0105 23:00:04.499778 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a43b047b-40b6-4c3d-aac2-eb352229d2c2-config-volume\") pod \"a43b047b-40b6-4c3d-aac2-eb352229d2c2\" (UID: \"a43b047b-40b6-4c3d-aac2-eb352229d2c2\") " Jan 05 23:00:04 crc kubenswrapper[4910]: I0105 23:00:04.500577 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a43b047b-40b6-4c3d-aac2-eb352229d2c2-config-volume" (OuterVolumeSpecName: "config-volume") pod "a43b047b-40b6-4c3d-aac2-eb352229d2c2" (UID: "a43b047b-40b6-4c3d-aac2-eb352229d2c2"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:00:04 crc kubenswrapper[4910]: I0105 23:00:04.505197 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a43b047b-40b6-4c3d-aac2-eb352229d2c2-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "a43b047b-40b6-4c3d-aac2-eb352229d2c2" (UID: "a43b047b-40b6-4c3d-aac2-eb352229d2c2"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:00:04 crc kubenswrapper[4910]: I0105 23:00:04.507657 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a43b047b-40b6-4c3d-aac2-eb352229d2c2-kube-api-access-88bf6" (OuterVolumeSpecName: "kube-api-access-88bf6") pod "a43b047b-40b6-4c3d-aac2-eb352229d2c2" (UID: "a43b047b-40b6-4c3d-aac2-eb352229d2c2"). InnerVolumeSpecName "kube-api-access-88bf6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:00:04 crc kubenswrapper[4910]: I0105 23:00:04.602060 4910 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a43b047b-40b6-4c3d-aac2-eb352229d2c2-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 05 23:00:04 crc kubenswrapper[4910]: I0105 23:00:04.602102 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-88bf6\" (UniqueName: \"kubernetes.io/projected/a43b047b-40b6-4c3d-aac2-eb352229d2c2-kube-api-access-88bf6\") on node \"crc\" DevicePath \"\"" Jan 05 23:00:04 crc kubenswrapper[4910]: I0105 23:00:04.602115 4910 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a43b047b-40b6-4c3d-aac2-eb352229d2c2-config-volume\") on node \"crc\" DevicePath \"\"" Jan 05 23:00:04 crc kubenswrapper[4910]: I0105 23:00:04.742881 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d7b1342f-1eb2-4a6b-a00a-45798ac4de9d" path="/var/lib/kubelet/pods/d7b1342f-1eb2-4a6b-a00a-45798ac4de9d/volumes" Jan 05 23:00:05 crc kubenswrapper[4910]: I0105 23:00:05.136879 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29460900-fhd54" event={"ID":"a43b047b-40b6-4c3d-aac2-eb352229d2c2","Type":"ContainerDied","Data":"9b0ab743218015c7c161483d23caf14bd1611df82cad7d8a3b7e983b38295120"} Jan 05 23:00:05 crc kubenswrapper[4910]: I0105 23:00:05.136938 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9b0ab743218015c7c161483d23caf14bd1611df82cad7d8a3b7e983b38295120" Jan 05 23:00:05 crc kubenswrapper[4910]: I0105 23:00:05.137020 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460900-fhd54" Jan 05 23:00:05 crc kubenswrapper[4910]: I0105 23:00:05.208274 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460855-j2sjl"] Jan 05 23:00:05 crc kubenswrapper[4910]: I0105 23:00:05.213935 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460855-j2sjl"] Jan 05 23:00:06 crc kubenswrapper[4910]: I0105 23:00:06.733436 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5cfd0899-0cb5-4727-9c44-27799c5a2131" path="/var/lib/kubelet/pods/5cfd0899-0cb5-4727-9c44-27799c5a2131/volumes" Jan 05 23:00:14 crc kubenswrapper[4910]: I0105 23:00:14.722104 4910 scope.go:117] "RemoveContainer" containerID="ad49db6d7400b47b6d444963f4908f27fa4dbe9bca288c41277849995e7bf497" Jan 05 23:00:15 crc kubenswrapper[4910]: I0105 23:00:15.252996 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerStarted","Data":"4e7b32b882dd159d8555cffe618c0468a49206f8ed5fc41c354f16ebe332cf04"} Jan 05 23:00:20 crc kubenswrapper[4910]: I0105 23:00:20.256357 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-pvsvr"] Jan 05 23:00:20 crc kubenswrapper[4910]: E0105 23:00:20.261043 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7b1342f-1eb2-4a6b-a00a-45798ac4de9d" containerName="extract-content" Jan 05 23:00:20 crc kubenswrapper[4910]: I0105 23:00:20.261245 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7b1342f-1eb2-4a6b-a00a-45798ac4de9d" containerName="extract-content" Jan 05 23:00:20 crc kubenswrapper[4910]: E0105 23:00:20.261385 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7b1342f-1eb2-4a6b-a00a-45798ac4de9d" containerName="registry-server" Jan 05 23:00:20 crc kubenswrapper[4910]: I0105 23:00:20.261505 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7b1342f-1eb2-4a6b-a00a-45798ac4de9d" containerName="registry-server" Jan 05 23:00:20 crc kubenswrapper[4910]: E0105 23:00:20.261645 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a43b047b-40b6-4c3d-aac2-eb352229d2c2" containerName="collect-profiles" Jan 05 23:00:20 crc kubenswrapper[4910]: I0105 23:00:20.261761 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a43b047b-40b6-4c3d-aac2-eb352229d2c2" containerName="collect-profiles" Jan 05 23:00:20 crc kubenswrapper[4910]: E0105 23:00:20.261908 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7b1342f-1eb2-4a6b-a00a-45798ac4de9d" containerName="extract-utilities" Jan 05 23:00:20 crc kubenswrapper[4910]: I0105 23:00:20.262024 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7b1342f-1eb2-4a6b-a00a-45798ac4de9d" containerName="extract-utilities" Jan 05 23:00:20 crc kubenswrapper[4910]: I0105 23:00:20.262575 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7b1342f-1eb2-4a6b-a00a-45798ac4de9d" containerName="registry-server" Jan 05 23:00:20 crc kubenswrapper[4910]: I0105 23:00:20.262727 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="a43b047b-40b6-4c3d-aac2-eb352229d2c2" containerName="collect-profiles" Jan 05 23:00:20 crc kubenswrapper[4910]: I0105 23:00:20.264793 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pvsvr" Jan 05 23:00:20 crc kubenswrapper[4910]: I0105 23:00:20.287165 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pvsvr"] Jan 05 23:00:20 crc kubenswrapper[4910]: I0105 23:00:20.433798 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rjq2d\" (UniqueName: \"kubernetes.io/projected/da846630-7c91-491a-91ee-99181a0f813b-kube-api-access-rjq2d\") pod \"community-operators-pvsvr\" (UID: \"da846630-7c91-491a-91ee-99181a0f813b\") " pod="openshift-marketplace/community-operators-pvsvr" Jan 05 23:00:20 crc kubenswrapper[4910]: I0105 23:00:20.434182 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da846630-7c91-491a-91ee-99181a0f813b-utilities\") pod \"community-operators-pvsvr\" (UID: \"da846630-7c91-491a-91ee-99181a0f813b\") " pod="openshift-marketplace/community-operators-pvsvr" Jan 05 23:00:20 crc kubenswrapper[4910]: I0105 23:00:20.434242 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da846630-7c91-491a-91ee-99181a0f813b-catalog-content\") pod \"community-operators-pvsvr\" (UID: \"da846630-7c91-491a-91ee-99181a0f813b\") " pod="openshift-marketplace/community-operators-pvsvr" Jan 05 23:00:20 crc kubenswrapper[4910]: I0105 23:00:20.535948 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rjq2d\" (UniqueName: \"kubernetes.io/projected/da846630-7c91-491a-91ee-99181a0f813b-kube-api-access-rjq2d\") pod \"community-operators-pvsvr\" (UID: \"da846630-7c91-491a-91ee-99181a0f813b\") " pod="openshift-marketplace/community-operators-pvsvr" Jan 05 23:00:20 crc kubenswrapper[4910]: I0105 23:00:20.536506 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da846630-7c91-491a-91ee-99181a0f813b-utilities\") pod \"community-operators-pvsvr\" (UID: \"da846630-7c91-491a-91ee-99181a0f813b\") " pod="openshift-marketplace/community-operators-pvsvr" Jan 05 23:00:20 crc kubenswrapper[4910]: I0105 23:00:20.536778 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da846630-7c91-491a-91ee-99181a0f813b-catalog-content\") pod \"community-operators-pvsvr\" (UID: \"da846630-7c91-491a-91ee-99181a0f813b\") " pod="openshift-marketplace/community-operators-pvsvr" Jan 05 23:00:20 crc kubenswrapper[4910]: I0105 23:00:20.537002 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da846630-7c91-491a-91ee-99181a0f813b-utilities\") pod \"community-operators-pvsvr\" (UID: \"da846630-7c91-491a-91ee-99181a0f813b\") " pod="openshift-marketplace/community-operators-pvsvr" Jan 05 23:00:20 crc kubenswrapper[4910]: I0105 23:00:20.537453 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da846630-7c91-491a-91ee-99181a0f813b-catalog-content\") pod \"community-operators-pvsvr\" (UID: \"da846630-7c91-491a-91ee-99181a0f813b\") " pod="openshift-marketplace/community-operators-pvsvr" Jan 05 23:00:20 crc kubenswrapper[4910]: I0105 23:00:20.570159 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rjq2d\" (UniqueName: \"kubernetes.io/projected/da846630-7c91-491a-91ee-99181a0f813b-kube-api-access-rjq2d\") pod \"community-operators-pvsvr\" (UID: \"da846630-7c91-491a-91ee-99181a0f813b\") " pod="openshift-marketplace/community-operators-pvsvr" Jan 05 23:00:20 crc kubenswrapper[4910]: I0105 23:00:20.629875 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pvsvr" Jan 05 23:00:21 crc kubenswrapper[4910]: I0105 23:00:21.136015 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pvsvr"] Jan 05 23:00:22 crc kubenswrapper[4910]: I0105 23:00:22.323040 4910 generic.go:334] "Generic (PLEG): container finished" podID="da846630-7c91-491a-91ee-99181a0f813b" containerID="5e7449bd9030a489db89a98a5ad46835d6756e882ef2d384f38209309f4d958b" exitCode=0 Jan 05 23:00:22 crc kubenswrapper[4910]: I0105 23:00:22.323153 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pvsvr" event={"ID":"da846630-7c91-491a-91ee-99181a0f813b","Type":"ContainerDied","Data":"5e7449bd9030a489db89a98a5ad46835d6756e882ef2d384f38209309f4d958b"} Jan 05 23:00:22 crc kubenswrapper[4910]: I0105 23:00:22.323594 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pvsvr" event={"ID":"da846630-7c91-491a-91ee-99181a0f813b","Type":"ContainerStarted","Data":"4043f220fc955bd2806cbe771114b5b92c49ad5d741526634fab5c215b2f8db4"} Jan 05 23:00:22 crc kubenswrapper[4910]: I0105 23:00:22.653449 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-d2tdd"] Jan 05 23:00:22 crc kubenswrapper[4910]: I0105 23:00:22.655530 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d2tdd" Jan 05 23:00:22 crc kubenswrapper[4910]: I0105 23:00:22.662486 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-d2tdd"] Jan 05 23:00:22 crc kubenswrapper[4910]: I0105 23:00:22.697936 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/182638c6-e96c-4d6d-b186-f47220aa8fa9-utilities\") pod \"redhat-marketplace-d2tdd\" (UID: \"182638c6-e96c-4d6d-b186-f47220aa8fa9\") " pod="openshift-marketplace/redhat-marketplace-d2tdd" Jan 05 23:00:22 crc kubenswrapper[4910]: I0105 23:00:22.698056 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8r4cr\" (UniqueName: \"kubernetes.io/projected/182638c6-e96c-4d6d-b186-f47220aa8fa9-kube-api-access-8r4cr\") pod \"redhat-marketplace-d2tdd\" (UID: \"182638c6-e96c-4d6d-b186-f47220aa8fa9\") " pod="openshift-marketplace/redhat-marketplace-d2tdd" Jan 05 23:00:22 crc kubenswrapper[4910]: I0105 23:00:22.698179 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/182638c6-e96c-4d6d-b186-f47220aa8fa9-catalog-content\") pod \"redhat-marketplace-d2tdd\" (UID: \"182638c6-e96c-4d6d-b186-f47220aa8fa9\") " pod="openshift-marketplace/redhat-marketplace-d2tdd" Jan 05 23:00:22 crc kubenswrapper[4910]: I0105 23:00:22.799801 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/182638c6-e96c-4d6d-b186-f47220aa8fa9-catalog-content\") pod \"redhat-marketplace-d2tdd\" (UID: \"182638c6-e96c-4d6d-b186-f47220aa8fa9\") " pod="openshift-marketplace/redhat-marketplace-d2tdd" Jan 05 23:00:22 crc kubenswrapper[4910]: I0105 23:00:22.800553 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/182638c6-e96c-4d6d-b186-f47220aa8fa9-catalog-content\") pod \"redhat-marketplace-d2tdd\" (UID: \"182638c6-e96c-4d6d-b186-f47220aa8fa9\") " pod="openshift-marketplace/redhat-marketplace-d2tdd" Jan 05 23:00:22 crc kubenswrapper[4910]: I0105 23:00:22.800903 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/182638c6-e96c-4d6d-b186-f47220aa8fa9-utilities\") pod \"redhat-marketplace-d2tdd\" (UID: \"182638c6-e96c-4d6d-b186-f47220aa8fa9\") " pod="openshift-marketplace/redhat-marketplace-d2tdd" Jan 05 23:00:22 crc kubenswrapper[4910]: I0105 23:00:22.801268 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/182638c6-e96c-4d6d-b186-f47220aa8fa9-utilities\") pod \"redhat-marketplace-d2tdd\" (UID: \"182638c6-e96c-4d6d-b186-f47220aa8fa9\") " pod="openshift-marketplace/redhat-marketplace-d2tdd" Jan 05 23:00:22 crc kubenswrapper[4910]: I0105 23:00:22.801353 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8r4cr\" (UniqueName: \"kubernetes.io/projected/182638c6-e96c-4d6d-b186-f47220aa8fa9-kube-api-access-8r4cr\") pod \"redhat-marketplace-d2tdd\" (UID: \"182638c6-e96c-4d6d-b186-f47220aa8fa9\") " pod="openshift-marketplace/redhat-marketplace-d2tdd" Jan 05 23:00:22 crc kubenswrapper[4910]: I0105 23:00:22.838098 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8r4cr\" (UniqueName: \"kubernetes.io/projected/182638c6-e96c-4d6d-b186-f47220aa8fa9-kube-api-access-8r4cr\") pod \"redhat-marketplace-d2tdd\" (UID: \"182638c6-e96c-4d6d-b186-f47220aa8fa9\") " pod="openshift-marketplace/redhat-marketplace-d2tdd" Jan 05 23:00:22 crc kubenswrapper[4910]: I0105 23:00:22.997407 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d2tdd" Jan 05 23:00:23 crc kubenswrapper[4910]: I0105 23:00:23.310967 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-d2tdd"] Jan 05 23:00:23 crc kubenswrapper[4910]: W0105 23:00:23.317719 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod182638c6_e96c_4d6d_b186_f47220aa8fa9.slice/crio-f180b88082af83dde465ac4d5b1977a3dacf67f687e72fc3c40dce7455706b2d WatchSource:0}: Error finding container f180b88082af83dde465ac4d5b1977a3dacf67f687e72fc3c40dce7455706b2d: Status 404 returned error can't find the container with id f180b88082af83dde465ac4d5b1977a3dacf67f687e72fc3c40dce7455706b2d Jan 05 23:00:23 crc kubenswrapper[4910]: I0105 23:00:23.333970 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d2tdd" event={"ID":"182638c6-e96c-4d6d-b186-f47220aa8fa9","Type":"ContainerStarted","Data":"f180b88082af83dde465ac4d5b1977a3dacf67f687e72fc3c40dce7455706b2d"} Jan 05 23:00:24 crc kubenswrapper[4910]: I0105 23:00:24.344763 4910 generic.go:334] "Generic (PLEG): container finished" podID="da846630-7c91-491a-91ee-99181a0f813b" containerID="ce94dc49efb03919d548d5dc4222dc9c1dfde3daf37cb6bf73f31851ffe9efc0" exitCode=0 Jan 05 23:00:24 crc kubenswrapper[4910]: I0105 23:00:24.344881 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pvsvr" event={"ID":"da846630-7c91-491a-91ee-99181a0f813b","Type":"ContainerDied","Data":"ce94dc49efb03919d548d5dc4222dc9c1dfde3daf37cb6bf73f31851ffe9efc0"} Jan 05 23:00:24 crc kubenswrapper[4910]: I0105 23:00:24.349280 4910 generic.go:334] "Generic (PLEG): container finished" podID="182638c6-e96c-4d6d-b186-f47220aa8fa9" containerID="07e59f54b49ef9cadf0cb908cae058d1fb7a15b0f37c2be6c0ec67c7d5a7a6d4" exitCode=0 Jan 05 23:00:24 crc kubenswrapper[4910]: I0105 23:00:24.349362 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d2tdd" event={"ID":"182638c6-e96c-4d6d-b186-f47220aa8fa9","Type":"ContainerDied","Data":"07e59f54b49ef9cadf0cb908cae058d1fb7a15b0f37c2be6c0ec67c7d5a7a6d4"} Jan 05 23:00:25 crc kubenswrapper[4910]: I0105 23:00:25.362254 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d2tdd" event={"ID":"182638c6-e96c-4d6d-b186-f47220aa8fa9","Type":"ContainerStarted","Data":"a6341532d030312453e65c03ac772bae3cd2de15a03d5a088a0e2a1c1abac070"} Jan 05 23:00:25 crc kubenswrapper[4910]: I0105 23:00:25.364368 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pvsvr" event={"ID":"da846630-7c91-491a-91ee-99181a0f813b","Type":"ContainerStarted","Data":"a37dceeaa30181ad7a741a49ed17a772415e3a306873d6805e55fd73f7d9c5a6"} Jan 05 23:00:25 crc kubenswrapper[4910]: I0105 23:00:25.413027 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-pvsvr" podStartSLOduration=2.967744678 podStartE2EDuration="5.413000844s" podCreationTimestamp="2026-01-05 23:00:20 +0000 UTC" firstStartedPulling="2026-01-05 23:00:22.327468527 +0000 UTC m=+4153.904966197" lastFinishedPulling="2026-01-05 23:00:24.772724693 +0000 UTC m=+4156.350222363" observedRunningTime="2026-01-05 23:00:25.407598381 +0000 UTC m=+4156.985096051" watchObservedRunningTime="2026-01-05 23:00:25.413000844 +0000 UTC m=+4156.990498514" Jan 05 23:00:26 crc kubenswrapper[4910]: I0105 23:00:26.377158 4910 generic.go:334] "Generic (PLEG): container finished" podID="182638c6-e96c-4d6d-b186-f47220aa8fa9" containerID="a6341532d030312453e65c03ac772bae3cd2de15a03d5a088a0e2a1c1abac070" exitCode=0 Jan 05 23:00:26 crc kubenswrapper[4910]: I0105 23:00:26.377311 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d2tdd" event={"ID":"182638c6-e96c-4d6d-b186-f47220aa8fa9","Type":"ContainerDied","Data":"a6341532d030312453e65c03ac772bae3cd2de15a03d5a088a0e2a1c1abac070"} Jan 05 23:00:27 crc kubenswrapper[4910]: I0105 23:00:27.390186 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d2tdd" event={"ID":"182638c6-e96c-4d6d-b186-f47220aa8fa9","Type":"ContainerStarted","Data":"67e1fa8a6f427817bb59cb791fe01e3cecde05e3d995cc2fd24ba6b9846f1f8b"} Jan 05 23:00:27 crc kubenswrapper[4910]: I0105 23:00:27.423354 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-d2tdd" podStartSLOduration=2.840328093 podStartE2EDuration="5.423309749s" podCreationTimestamp="2026-01-05 23:00:22 +0000 UTC" firstStartedPulling="2026-01-05 23:00:24.350990949 +0000 UTC m=+4155.928488619" lastFinishedPulling="2026-01-05 23:00:26.933972595 +0000 UTC m=+4158.511470275" observedRunningTime="2026-01-05 23:00:27.413865966 +0000 UTC m=+4158.991363676" watchObservedRunningTime="2026-01-05 23:00:27.423309749 +0000 UTC m=+4159.000807459" Jan 05 23:00:30 crc kubenswrapper[4910]: I0105 23:00:30.631794 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-pvsvr" Jan 05 23:00:30 crc kubenswrapper[4910]: I0105 23:00:30.632306 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-pvsvr" Jan 05 23:00:30 crc kubenswrapper[4910]: I0105 23:00:30.690042 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-pvsvr" Jan 05 23:00:31 crc kubenswrapper[4910]: I0105 23:00:31.544395 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-pvsvr" Jan 05 23:00:31 crc kubenswrapper[4910]: I0105 23:00:31.845886 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pvsvr"] Jan 05 23:00:32 crc kubenswrapper[4910]: I0105 23:00:32.997859 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-d2tdd" Jan 05 23:00:32 crc kubenswrapper[4910]: I0105 23:00:32.998543 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-d2tdd" Jan 05 23:00:33 crc kubenswrapper[4910]: I0105 23:00:33.077373 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-d2tdd" Jan 05 23:00:33 crc kubenswrapper[4910]: I0105 23:00:33.489779 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-pvsvr" podUID="da846630-7c91-491a-91ee-99181a0f813b" containerName="registry-server" containerID="cri-o://a37dceeaa30181ad7a741a49ed17a772415e3a306873d6805e55fd73f7d9c5a6" gracePeriod=2 Jan 05 23:00:33 crc kubenswrapper[4910]: I0105 23:00:33.560184 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-d2tdd" Jan 05 23:00:34 crc kubenswrapper[4910]: I0105 23:00:34.246566 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-d2tdd"] Jan 05 23:00:34 crc kubenswrapper[4910]: I0105 23:00:34.507041 4910 generic.go:334] "Generic (PLEG): container finished" podID="da846630-7c91-491a-91ee-99181a0f813b" containerID="a37dceeaa30181ad7a741a49ed17a772415e3a306873d6805e55fd73f7d9c5a6" exitCode=0 Jan 05 23:00:34 crc kubenswrapper[4910]: I0105 23:00:34.508194 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pvsvr" event={"ID":"da846630-7c91-491a-91ee-99181a0f813b","Type":"ContainerDied","Data":"a37dceeaa30181ad7a741a49ed17a772415e3a306873d6805e55fd73f7d9c5a6"} Jan 05 23:00:35 crc kubenswrapper[4910]: I0105 23:00:35.093532 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pvsvr" Jan 05 23:00:35 crc kubenswrapper[4910]: I0105 23:00:35.165066 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rjq2d\" (UniqueName: \"kubernetes.io/projected/da846630-7c91-491a-91ee-99181a0f813b-kube-api-access-rjq2d\") pod \"da846630-7c91-491a-91ee-99181a0f813b\" (UID: \"da846630-7c91-491a-91ee-99181a0f813b\") " Jan 05 23:00:35 crc kubenswrapper[4910]: I0105 23:00:35.165271 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da846630-7c91-491a-91ee-99181a0f813b-utilities\") pod \"da846630-7c91-491a-91ee-99181a0f813b\" (UID: \"da846630-7c91-491a-91ee-99181a0f813b\") " Jan 05 23:00:35 crc kubenswrapper[4910]: I0105 23:00:35.165373 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da846630-7c91-491a-91ee-99181a0f813b-catalog-content\") pod \"da846630-7c91-491a-91ee-99181a0f813b\" (UID: \"da846630-7c91-491a-91ee-99181a0f813b\") " Jan 05 23:00:35 crc kubenswrapper[4910]: I0105 23:00:35.166715 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/da846630-7c91-491a-91ee-99181a0f813b-utilities" (OuterVolumeSpecName: "utilities") pod "da846630-7c91-491a-91ee-99181a0f813b" (UID: "da846630-7c91-491a-91ee-99181a0f813b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:00:35 crc kubenswrapper[4910]: I0105 23:00:35.174597 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da846630-7c91-491a-91ee-99181a0f813b-kube-api-access-rjq2d" (OuterVolumeSpecName: "kube-api-access-rjq2d") pod "da846630-7c91-491a-91ee-99181a0f813b" (UID: "da846630-7c91-491a-91ee-99181a0f813b"). InnerVolumeSpecName "kube-api-access-rjq2d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:00:35 crc kubenswrapper[4910]: I0105 23:00:35.175165 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da846630-7c91-491a-91ee-99181a0f813b-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 23:00:35 crc kubenswrapper[4910]: I0105 23:00:35.175204 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rjq2d\" (UniqueName: \"kubernetes.io/projected/da846630-7c91-491a-91ee-99181a0f813b-kube-api-access-rjq2d\") on node \"crc\" DevicePath \"\"" Jan 05 23:00:35 crc kubenswrapper[4910]: I0105 23:00:35.260543 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/da846630-7c91-491a-91ee-99181a0f813b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "da846630-7c91-491a-91ee-99181a0f813b" (UID: "da846630-7c91-491a-91ee-99181a0f813b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:00:35 crc kubenswrapper[4910]: I0105 23:00:35.275786 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da846630-7c91-491a-91ee-99181a0f813b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 23:00:35 crc kubenswrapper[4910]: I0105 23:00:35.527990 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pvsvr" event={"ID":"da846630-7c91-491a-91ee-99181a0f813b","Type":"ContainerDied","Data":"4043f220fc955bd2806cbe771114b5b92c49ad5d741526634fab5c215b2f8db4"} Jan 05 23:00:35 crc kubenswrapper[4910]: I0105 23:00:35.528087 4910 scope.go:117] "RemoveContainer" containerID="a37dceeaa30181ad7a741a49ed17a772415e3a306873d6805e55fd73f7d9c5a6" Jan 05 23:00:35 crc kubenswrapper[4910]: I0105 23:00:35.528186 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pvsvr" Jan 05 23:00:35 crc kubenswrapper[4910]: I0105 23:00:35.528069 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-d2tdd" podUID="182638c6-e96c-4d6d-b186-f47220aa8fa9" containerName="registry-server" containerID="cri-o://67e1fa8a6f427817bb59cb791fe01e3cecde05e3d995cc2fd24ba6b9846f1f8b" gracePeriod=2 Jan 05 23:00:35 crc kubenswrapper[4910]: I0105 23:00:35.561265 4910 scope.go:117] "RemoveContainer" containerID="ce94dc49efb03919d548d5dc4222dc9c1dfde3daf37cb6bf73f31851ffe9efc0" Jan 05 23:00:35 crc kubenswrapper[4910]: I0105 23:00:35.581843 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pvsvr"] Jan 05 23:00:35 crc kubenswrapper[4910]: I0105 23:00:35.596940 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-pvsvr"] Jan 05 23:00:35 crc kubenswrapper[4910]: I0105 23:00:35.699737 4910 scope.go:117] "RemoveContainer" containerID="5e7449bd9030a489db89a98a5ad46835d6756e882ef2d384f38209309f4d958b" Jan 05 23:00:35 crc kubenswrapper[4910]: E0105 23:00:35.699975 4910 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod182638c6_e96c_4d6d_b186_f47220aa8fa9.slice/crio-67e1fa8a6f427817bb59cb791fe01e3cecde05e3d995cc2fd24ba6b9846f1f8b.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podda846630_7c91_491a_91ee_99181a0f813b.slice\": RecentStats: unable to find data in memory cache]" Jan 05 23:00:35 crc kubenswrapper[4910]: I0105 23:00:35.999484 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d2tdd" Jan 05 23:00:36 crc kubenswrapper[4910]: I0105 23:00:36.093012 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/182638c6-e96c-4d6d-b186-f47220aa8fa9-utilities\") pod \"182638c6-e96c-4d6d-b186-f47220aa8fa9\" (UID: \"182638c6-e96c-4d6d-b186-f47220aa8fa9\") " Jan 05 23:00:36 crc kubenswrapper[4910]: I0105 23:00:36.093485 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/182638c6-e96c-4d6d-b186-f47220aa8fa9-catalog-content\") pod \"182638c6-e96c-4d6d-b186-f47220aa8fa9\" (UID: \"182638c6-e96c-4d6d-b186-f47220aa8fa9\") " Jan 05 23:00:36 crc kubenswrapper[4910]: I0105 23:00:36.093664 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8r4cr\" (UniqueName: \"kubernetes.io/projected/182638c6-e96c-4d6d-b186-f47220aa8fa9-kube-api-access-8r4cr\") pod \"182638c6-e96c-4d6d-b186-f47220aa8fa9\" (UID: \"182638c6-e96c-4d6d-b186-f47220aa8fa9\") " Jan 05 23:00:36 crc kubenswrapper[4910]: I0105 23:00:36.095848 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/182638c6-e96c-4d6d-b186-f47220aa8fa9-utilities" (OuterVolumeSpecName: "utilities") pod "182638c6-e96c-4d6d-b186-f47220aa8fa9" (UID: "182638c6-e96c-4d6d-b186-f47220aa8fa9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:00:36 crc kubenswrapper[4910]: I0105 23:00:36.099955 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/182638c6-e96c-4d6d-b186-f47220aa8fa9-kube-api-access-8r4cr" (OuterVolumeSpecName: "kube-api-access-8r4cr") pod "182638c6-e96c-4d6d-b186-f47220aa8fa9" (UID: "182638c6-e96c-4d6d-b186-f47220aa8fa9"). InnerVolumeSpecName "kube-api-access-8r4cr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:00:36 crc kubenswrapper[4910]: I0105 23:00:36.116171 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/182638c6-e96c-4d6d-b186-f47220aa8fa9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "182638c6-e96c-4d6d-b186-f47220aa8fa9" (UID: "182638c6-e96c-4d6d-b186-f47220aa8fa9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:00:36 crc kubenswrapper[4910]: I0105 23:00:36.195504 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/182638c6-e96c-4d6d-b186-f47220aa8fa9-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 23:00:36 crc kubenswrapper[4910]: I0105 23:00:36.195565 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8r4cr\" (UniqueName: \"kubernetes.io/projected/182638c6-e96c-4d6d-b186-f47220aa8fa9-kube-api-access-8r4cr\") on node \"crc\" DevicePath \"\"" Jan 05 23:00:36 crc kubenswrapper[4910]: I0105 23:00:36.195584 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/182638c6-e96c-4d6d-b186-f47220aa8fa9-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 23:00:36 crc kubenswrapper[4910]: I0105 23:00:36.542235 4910 generic.go:334] "Generic (PLEG): container finished" podID="182638c6-e96c-4d6d-b186-f47220aa8fa9" containerID="67e1fa8a6f427817bb59cb791fe01e3cecde05e3d995cc2fd24ba6b9846f1f8b" exitCode=0 Jan 05 23:00:36 crc kubenswrapper[4910]: I0105 23:00:36.542308 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d2tdd" event={"ID":"182638c6-e96c-4d6d-b186-f47220aa8fa9","Type":"ContainerDied","Data":"67e1fa8a6f427817bb59cb791fe01e3cecde05e3d995cc2fd24ba6b9846f1f8b"} Jan 05 23:00:36 crc kubenswrapper[4910]: I0105 23:00:36.542382 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d2tdd" Jan 05 23:00:36 crc kubenswrapper[4910]: I0105 23:00:36.542415 4910 scope.go:117] "RemoveContainer" containerID="67e1fa8a6f427817bb59cb791fe01e3cecde05e3d995cc2fd24ba6b9846f1f8b" Jan 05 23:00:36 crc kubenswrapper[4910]: I0105 23:00:36.542393 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d2tdd" event={"ID":"182638c6-e96c-4d6d-b186-f47220aa8fa9","Type":"ContainerDied","Data":"f180b88082af83dde465ac4d5b1977a3dacf67f687e72fc3c40dce7455706b2d"} Jan 05 23:00:36 crc kubenswrapper[4910]: I0105 23:00:36.575018 4910 scope.go:117] "RemoveContainer" containerID="a6341532d030312453e65c03ac772bae3cd2de15a03d5a088a0e2a1c1abac070" Jan 05 23:00:36 crc kubenswrapper[4910]: I0105 23:00:36.591627 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-d2tdd"] Jan 05 23:00:36 crc kubenswrapper[4910]: I0105 23:00:36.598213 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-d2tdd"] Jan 05 23:00:36 crc kubenswrapper[4910]: I0105 23:00:36.634181 4910 scope.go:117] "RemoveContainer" containerID="07e59f54b49ef9cadf0cb908cae058d1fb7a15b0f37c2be6c0ec67c7d5a7a6d4" Jan 05 23:00:36 crc kubenswrapper[4910]: I0105 23:00:36.657709 4910 scope.go:117] "RemoveContainer" containerID="67e1fa8a6f427817bb59cb791fe01e3cecde05e3d995cc2fd24ba6b9846f1f8b" Jan 05 23:00:36 crc kubenswrapper[4910]: E0105 23:00:36.658330 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"67e1fa8a6f427817bb59cb791fe01e3cecde05e3d995cc2fd24ba6b9846f1f8b\": container with ID starting with 67e1fa8a6f427817bb59cb791fe01e3cecde05e3d995cc2fd24ba6b9846f1f8b not found: ID does not exist" containerID="67e1fa8a6f427817bb59cb791fe01e3cecde05e3d995cc2fd24ba6b9846f1f8b" Jan 05 23:00:36 crc kubenswrapper[4910]: I0105 23:00:36.658428 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67e1fa8a6f427817bb59cb791fe01e3cecde05e3d995cc2fd24ba6b9846f1f8b"} err="failed to get container status \"67e1fa8a6f427817bb59cb791fe01e3cecde05e3d995cc2fd24ba6b9846f1f8b\": rpc error: code = NotFound desc = could not find container \"67e1fa8a6f427817bb59cb791fe01e3cecde05e3d995cc2fd24ba6b9846f1f8b\": container with ID starting with 67e1fa8a6f427817bb59cb791fe01e3cecde05e3d995cc2fd24ba6b9846f1f8b not found: ID does not exist" Jan 05 23:00:36 crc kubenswrapper[4910]: I0105 23:00:36.658479 4910 scope.go:117] "RemoveContainer" containerID="a6341532d030312453e65c03ac772bae3cd2de15a03d5a088a0e2a1c1abac070" Jan 05 23:00:36 crc kubenswrapper[4910]: E0105 23:00:36.659163 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a6341532d030312453e65c03ac772bae3cd2de15a03d5a088a0e2a1c1abac070\": container with ID starting with a6341532d030312453e65c03ac772bae3cd2de15a03d5a088a0e2a1c1abac070 not found: ID does not exist" containerID="a6341532d030312453e65c03ac772bae3cd2de15a03d5a088a0e2a1c1abac070" Jan 05 23:00:36 crc kubenswrapper[4910]: I0105 23:00:36.659211 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6341532d030312453e65c03ac772bae3cd2de15a03d5a088a0e2a1c1abac070"} err="failed to get container status \"a6341532d030312453e65c03ac772bae3cd2de15a03d5a088a0e2a1c1abac070\": rpc error: code = NotFound desc = could not find container \"a6341532d030312453e65c03ac772bae3cd2de15a03d5a088a0e2a1c1abac070\": container with ID starting with a6341532d030312453e65c03ac772bae3cd2de15a03d5a088a0e2a1c1abac070 not found: ID does not exist" Jan 05 23:00:36 crc kubenswrapper[4910]: I0105 23:00:36.659242 4910 scope.go:117] "RemoveContainer" containerID="07e59f54b49ef9cadf0cb908cae058d1fb7a15b0f37c2be6c0ec67c7d5a7a6d4" Jan 05 23:00:36 crc kubenswrapper[4910]: E0105 23:00:36.659740 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"07e59f54b49ef9cadf0cb908cae058d1fb7a15b0f37c2be6c0ec67c7d5a7a6d4\": container with ID starting with 07e59f54b49ef9cadf0cb908cae058d1fb7a15b0f37c2be6c0ec67c7d5a7a6d4 not found: ID does not exist" containerID="07e59f54b49ef9cadf0cb908cae058d1fb7a15b0f37c2be6c0ec67c7d5a7a6d4" Jan 05 23:00:36 crc kubenswrapper[4910]: I0105 23:00:36.659939 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07e59f54b49ef9cadf0cb908cae058d1fb7a15b0f37c2be6c0ec67c7d5a7a6d4"} err="failed to get container status \"07e59f54b49ef9cadf0cb908cae058d1fb7a15b0f37c2be6c0ec67c7d5a7a6d4\": rpc error: code = NotFound desc = could not find container \"07e59f54b49ef9cadf0cb908cae058d1fb7a15b0f37c2be6c0ec67c7d5a7a6d4\": container with ID starting with 07e59f54b49ef9cadf0cb908cae058d1fb7a15b0f37c2be6c0ec67c7d5a7a6d4 not found: ID does not exist" Jan 05 23:00:36 crc kubenswrapper[4910]: I0105 23:00:36.739658 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="182638c6-e96c-4d6d-b186-f47220aa8fa9" path="/var/lib/kubelet/pods/182638c6-e96c-4d6d-b186-f47220aa8fa9/volumes" Jan 05 23:00:36 crc kubenswrapper[4910]: I0105 23:00:36.740553 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da846630-7c91-491a-91ee-99181a0f813b" path="/var/lib/kubelet/pods/da846630-7c91-491a-91ee-99181a0f813b/volumes" Jan 05 23:00:43 crc kubenswrapper[4910]: I0105 23:00:43.698449 4910 scope.go:117] "RemoveContainer" containerID="5b446a829202ed8061be13c123b2bf6669781ba6ae975a5b2f50d239038094da" Jan 05 23:02:40 crc kubenswrapper[4910]: I0105 23:02:40.952460 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 23:02:40 crc kubenswrapper[4910]: I0105 23:02:40.953435 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 23:03:10 crc kubenswrapper[4910]: I0105 23:03:10.953233 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 23:03:10 crc kubenswrapper[4910]: I0105 23:03:10.954025 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 23:03:40 crc kubenswrapper[4910]: I0105 23:03:40.952732 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 23:03:40 crc kubenswrapper[4910]: I0105 23:03:40.953707 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 23:03:40 crc kubenswrapper[4910]: I0105 23:03:40.953777 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 23:03:40 crc kubenswrapper[4910]: I0105 23:03:40.954627 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4e7b32b882dd159d8555cffe618c0468a49206f8ed5fc41c354f16ebe332cf04"} pod="openshift-machine-config-operator/machine-config-daemon-p4t85" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 05 23:03:40 crc kubenswrapper[4910]: I0105 23:03:40.954680 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" containerID="cri-o://4e7b32b882dd159d8555cffe618c0468a49206f8ed5fc41c354f16ebe332cf04" gracePeriod=600 Jan 05 23:03:41 crc kubenswrapper[4910]: I0105 23:03:41.406581 4910 generic.go:334] "Generic (PLEG): container finished" podID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerID="4e7b32b882dd159d8555cffe618c0468a49206f8ed5fc41c354f16ebe332cf04" exitCode=0 Jan 05 23:03:41 crc kubenswrapper[4910]: I0105 23:03:41.406671 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerDied","Data":"4e7b32b882dd159d8555cffe618c0468a49206f8ed5fc41c354f16ebe332cf04"} Jan 05 23:03:41 crc kubenswrapper[4910]: I0105 23:03:41.407186 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerStarted","Data":"a91b2ed1dd18abdbeee2358c6f332ea718c2107406ac7cbb009f035571ce325d"} Jan 05 23:03:41 crc kubenswrapper[4910]: I0105 23:03:41.407210 4910 scope.go:117] "RemoveContainer" containerID="ad49db6d7400b47b6d444963f4908f27fa4dbe9bca288c41277849995e7bf497" Jan 05 23:06:10 crc kubenswrapper[4910]: I0105 23:06:10.952102 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 23:06:10 crc kubenswrapper[4910]: I0105 23:06:10.952863 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 23:06:40 crc kubenswrapper[4910]: I0105 23:06:40.952190 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 23:06:40 crc kubenswrapper[4910]: I0105 23:06:40.953477 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 23:06:59 crc kubenswrapper[4910]: I0105 23:06:59.793781 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["crc-storage/crc-storage-crc-4dq9c"] Jan 05 23:06:59 crc kubenswrapper[4910]: I0105 23:06:59.800770 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["crc-storage/crc-storage-crc-4dq9c"] Jan 05 23:06:59 crc kubenswrapper[4910]: I0105 23:06:59.927028 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-z7mld"] Jan 05 23:06:59 crc kubenswrapper[4910]: E0105 23:06:59.928031 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da846630-7c91-491a-91ee-99181a0f813b" containerName="registry-server" Jan 05 23:06:59 crc kubenswrapper[4910]: I0105 23:06:59.928059 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="da846630-7c91-491a-91ee-99181a0f813b" containerName="registry-server" Jan 05 23:06:59 crc kubenswrapper[4910]: E0105 23:06:59.928086 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="182638c6-e96c-4d6d-b186-f47220aa8fa9" containerName="extract-utilities" Jan 05 23:06:59 crc kubenswrapper[4910]: I0105 23:06:59.928095 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="182638c6-e96c-4d6d-b186-f47220aa8fa9" containerName="extract-utilities" Jan 05 23:06:59 crc kubenswrapper[4910]: E0105 23:06:59.928140 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="182638c6-e96c-4d6d-b186-f47220aa8fa9" containerName="registry-server" Jan 05 23:06:59 crc kubenswrapper[4910]: I0105 23:06:59.928151 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="182638c6-e96c-4d6d-b186-f47220aa8fa9" containerName="registry-server" Jan 05 23:06:59 crc kubenswrapper[4910]: E0105 23:06:59.928163 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da846630-7c91-491a-91ee-99181a0f813b" containerName="extract-utilities" Jan 05 23:06:59 crc kubenswrapper[4910]: I0105 23:06:59.928173 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="da846630-7c91-491a-91ee-99181a0f813b" containerName="extract-utilities" Jan 05 23:06:59 crc kubenswrapper[4910]: E0105 23:06:59.928192 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="182638c6-e96c-4d6d-b186-f47220aa8fa9" containerName="extract-content" Jan 05 23:06:59 crc kubenswrapper[4910]: I0105 23:06:59.928199 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="182638c6-e96c-4d6d-b186-f47220aa8fa9" containerName="extract-content" Jan 05 23:06:59 crc kubenswrapper[4910]: E0105 23:06:59.928224 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da846630-7c91-491a-91ee-99181a0f813b" containerName="extract-content" Jan 05 23:06:59 crc kubenswrapper[4910]: I0105 23:06:59.928234 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="da846630-7c91-491a-91ee-99181a0f813b" containerName="extract-content" Jan 05 23:06:59 crc kubenswrapper[4910]: I0105 23:06:59.928407 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="da846630-7c91-491a-91ee-99181a0f813b" containerName="registry-server" Jan 05 23:06:59 crc kubenswrapper[4910]: I0105 23:06:59.928434 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="182638c6-e96c-4d6d-b186-f47220aa8fa9" containerName="registry-server" Jan 05 23:06:59 crc kubenswrapper[4910]: I0105 23:06:59.928987 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-z7mld" Jan 05 23:06:59 crc kubenswrapper[4910]: I0105 23:06:59.931200 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Jan 05 23:06:59 crc kubenswrapper[4910]: I0105 23:06:59.932252 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Jan 05 23:06:59 crc kubenswrapper[4910]: I0105 23:06:59.932511 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Jan 05 23:06:59 crc kubenswrapper[4910]: I0105 23:06:59.941944 4910 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-wtn8h" Jan 05 23:06:59 crc kubenswrapper[4910]: I0105 23:06:59.944348 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-z7mld"] Jan 05 23:07:00 crc kubenswrapper[4910]: I0105 23:07:00.070318 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/b8dbe479-e4d9-46e2-8efc-d862330f7f7c-crc-storage\") pod \"crc-storage-crc-z7mld\" (UID: \"b8dbe479-e4d9-46e2-8efc-d862330f7f7c\") " pod="crc-storage/crc-storage-crc-z7mld" Jan 05 23:07:00 crc kubenswrapper[4910]: I0105 23:07:00.070822 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/b8dbe479-e4d9-46e2-8efc-d862330f7f7c-node-mnt\") pod \"crc-storage-crc-z7mld\" (UID: \"b8dbe479-e4d9-46e2-8efc-d862330f7f7c\") " pod="crc-storage/crc-storage-crc-z7mld" Jan 05 23:07:00 crc kubenswrapper[4910]: I0105 23:07:00.070966 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gnfb7\" (UniqueName: \"kubernetes.io/projected/b8dbe479-e4d9-46e2-8efc-d862330f7f7c-kube-api-access-gnfb7\") pod \"crc-storage-crc-z7mld\" (UID: \"b8dbe479-e4d9-46e2-8efc-d862330f7f7c\") " pod="crc-storage/crc-storage-crc-z7mld" Jan 05 23:07:00 crc kubenswrapper[4910]: I0105 23:07:00.172608 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/b8dbe479-e4d9-46e2-8efc-d862330f7f7c-crc-storage\") pod \"crc-storage-crc-z7mld\" (UID: \"b8dbe479-e4d9-46e2-8efc-d862330f7f7c\") " pod="crc-storage/crc-storage-crc-z7mld" Jan 05 23:07:00 crc kubenswrapper[4910]: I0105 23:07:00.172835 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/b8dbe479-e4d9-46e2-8efc-d862330f7f7c-node-mnt\") pod \"crc-storage-crc-z7mld\" (UID: \"b8dbe479-e4d9-46e2-8efc-d862330f7f7c\") " pod="crc-storage/crc-storage-crc-z7mld" Jan 05 23:07:00 crc kubenswrapper[4910]: I0105 23:07:00.172883 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gnfb7\" (UniqueName: \"kubernetes.io/projected/b8dbe479-e4d9-46e2-8efc-d862330f7f7c-kube-api-access-gnfb7\") pod \"crc-storage-crc-z7mld\" (UID: \"b8dbe479-e4d9-46e2-8efc-d862330f7f7c\") " pod="crc-storage/crc-storage-crc-z7mld" Jan 05 23:07:00 crc kubenswrapper[4910]: I0105 23:07:00.173715 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/b8dbe479-e4d9-46e2-8efc-d862330f7f7c-node-mnt\") pod \"crc-storage-crc-z7mld\" (UID: \"b8dbe479-e4d9-46e2-8efc-d862330f7f7c\") " pod="crc-storage/crc-storage-crc-z7mld" Jan 05 23:07:00 crc kubenswrapper[4910]: I0105 23:07:00.174415 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/b8dbe479-e4d9-46e2-8efc-d862330f7f7c-crc-storage\") pod \"crc-storage-crc-z7mld\" (UID: \"b8dbe479-e4d9-46e2-8efc-d862330f7f7c\") " pod="crc-storage/crc-storage-crc-z7mld" Jan 05 23:07:00 crc kubenswrapper[4910]: I0105 23:07:00.204082 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gnfb7\" (UniqueName: \"kubernetes.io/projected/b8dbe479-e4d9-46e2-8efc-d862330f7f7c-kube-api-access-gnfb7\") pod \"crc-storage-crc-z7mld\" (UID: \"b8dbe479-e4d9-46e2-8efc-d862330f7f7c\") " pod="crc-storage/crc-storage-crc-z7mld" Jan 05 23:07:00 crc kubenswrapper[4910]: I0105 23:07:00.251081 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-z7mld" Jan 05 23:07:00 crc kubenswrapper[4910]: I0105 23:07:00.732710 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b5c5e69-e0b4-4616-8284-99ca77b66846" path="/var/lib/kubelet/pods/1b5c5e69-e0b4-4616-8284-99ca77b66846/volumes" Jan 05 23:07:00 crc kubenswrapper[4910]: I0105 23:07:00.762018 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-z7mld"] Jan 05 23:07:00 crc kubenswrapper[4910]: I0105 23:07:00.776673 4910 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 05 23:07:01 crc kubenswrapper[4910]: I0105 23:07:01.689194 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-z7mld" event={"ID":"b8dbe479-e4d9-46e2-8efc-d862330f7f7c","Type":"ContainerStarted","Data":"44887f87bbfb4aade5ee69863fc1a9c3b25c7a9b05150fed19a703fdd1fcbb0f"} Jan 05 23:07:02 crc kubenswrapper[4910]: I0105 23:07:02.699350 4910 generic.go:334] "Generic (PLEG): container finished" podID="b8dbe479-e4d9-46e2-8efc-d862330f7f7c" containerID="ea48581f0cc67ccbdd71a320a19b6619bd0ba996b299034de9c35b5b3f514f4e" exitCode=0 Jan 05 23:07:02 crc kubenswrapper[4910]: I0105 23:07:02.699457 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-z7mld" event={"ID":"b8dbe479-e4d9-46e2-8efc-d862330f7f7c","Type":"ContainerDied","Data":"ea48581f0cc67ccbdd71a320a19b6619bd0ba996b299034de9c35b5b3f514f4e"} Jan 05 23:07:04 crc kubenswrapper[4910]: I0105 23:07:04.130215 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-z7mld" Jan 05 23:07:04 crc kubenswrapper[4910]: I0105 23:07:04.248912 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gnfb7\" (UniqueName: \"kubernetes.io/projected/b8dbe479-e4d9-46e2-8efc-d862330f7f7c-kube-api-access-gnfb7\") pod \"b8dbe479-e4d9-46e2-8efc-d862330f7f7c\" (UID: \"b8dbe479-e4d9-46e2-8efc-d862330f7f7c\") " Jan 05 23:07:04 crc kubenswrapper[4910]: I0105 23:07:04.249100 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/b8dbe479-e4d9-46e2-8efc-d862330f7f7c-node-mnt\") pod \"b8dbe479-e4d9-46e2-8efc-d862330f7f7c\" (UID: \"b8dbe479-e4d9-46e2-8efc-d862330f7f7c\") " Jan 05 23:07:04 crc kubenswrapper[4910]: I0105 23:07:04.249182 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/b8dbe479-e4d9-46e2-8efc-d862330f7f7c-crc-storage\") pod \"b8dbe479-e4d9-46e2-8efc-d862330f7f7c\" (UID: \"b8dbe479-e4d9-46e2-8efc-d862330f7f7c\") " Jan 05 23:07:04 crc kubenswrapper[4910]: I0105 23:07:04.249341 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b8dbe479-e4d9-46e2-8efc-d862330f7f7c-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "b8dbe479-e4d9-46e2-8efc-d862330f7f7c" (UID: "b8dbe479-e4d9-46e2-8efc-d862330f7f7c"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 23:07:04 crc kubenswrapper[4910]: I0105 23:07:04.250011 4910 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/b8dbe479-e4d9-46e2-8efc-d862330f7f7c-node-mnt\") on node \"crc\" DevicePath \"\"" Jan 05 23:07:04 crc kubenswrapper[4910]: I0105 23:07:04.257532 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8dbe479-e4d9-46e2-8efc-d862330f7f7c-kube-api-access-gnfb7" (OuterVolumeSpecName: "kube-api-access-gnfb7") pod "b8dbe479-e4d9-46e2-8efc-d862330f7f7c" (UID: "b8dbe479-e4d9-46e2-8efc-d862330f7f7c"). InnerVolumeSpecName "kube-api-access-gnfb7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:07:04 crc kubenswrapper[4910]: I0105 23:07:04.282993 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b8dbe479-e4d9-46e2-8efc-d862330f7f7c-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "b8dbe479-e4d9-46e2-8efc-d862330f7f7c" (UID: "b8dbe479-e4d9-46e2-8efc-d862330f7f7c"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:07:04 crc kubenswrapper[4910]: I0105 23:07:04.351422 4910 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/b8dbe479-e4d9-46e2-8efc-d862330f7f7c-crc-storage\") on node \"crc\" DevicePath \"\"" Jan 05 23:07:04 crc kubenswrapper[4910]: I0105 23:07:04.351485 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gnfb7\" (UniqueName: \"kubernetes.io/projected/b8dbe479-e4d9-46e2-8efc-d862330f7f7c-kube-api-access-gnfb7\") on node \"crc\" DevicePath \"\"" Jan 05 23:07:04 crc kubenswrapper[4910]: I0105 23:07:04.716177 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-z7mld" event={"ID":"b8dbe479-e4d9-46e2-8efc-d862330f7f7c","Type":"ContainerDied","Data":"44887f87bbfb4aade5ee69863fc1a9c3b25c7a9b05150fed19a703fdd1fcbb0f"} Jan 05 23:07:04 crc kubenswrapper[4910]: I0105 23:07:04.716234 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="44887f87bbfb4aade5ee69863fc1a9c3b25c7a9b05150fed19a703fdd1fcbb0f" Jan 05 23:07:04 crc kubenswrapper[4910]: I0105 23:07:04.716239 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-z7mld" Jan 05 23:07:06 crc kubenswrapper[4910]: I0105 23:07:06.497346 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["crc-storage/crc-storage-crc-z7mld"] Jan 05 23:07:06 crc kubenswrapper[4910]: I0105 23:07:06.505292 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["crc-storage/crc-storage-crc-z7mld"] Jan 05 23:07:06 crc kubenswrapper[4910]: I0105 23:07:06.623638 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-nmc9j"] Jan 05 23:07:06 crc kubenswrapper[4910]: E0105 23:07:06.623988 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8dbe479-e4d9-46e2-8efc-d862330f7f7c" containerName="storage" Jan 05 23:07:06 crc kubenswrapper[4910]: I0105 23:07:06.624008 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8dbe479-e4d9-46e2-8efc-d862330f7f7c" containerName="storage" Jan 05 23:07:06 crc kubenswrapper[4910]: I0105 23:07:06.624168 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8dbe479-e4d9-46e2-8efc-d862330f7f7c" containerName="storage" Jan 05 23:07:06 crc kubenswrapper[4910]: I0105 23:07:06.624884 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-nmc9j" Jan 05 23:07:06 crc kubenswrapper[4910]: I0105 23:07:06.630868 4910 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-wtn8h" Jan 05 23:07:06 crc kubenswrapper[4910]: I0105 23:07:06.631152 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Jan 05 23:07:06 crc kubenswrapper[4910]: I0105 23:07:06.631288 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Jan 05 23:07:06 crc kubenswrapper[4910]: I0105 23:07:06.631406 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Jan 05 23:07:06 crc kubenswrapper[4910]: I0105 23:07:06.645991 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-nmc9j"] Jan 05 23:07:06 crc kubenswrapper[4910]: I0105 23:07:06.690722 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/cfecba67-1f70-4572-88ac-fe9aab717b5d-crc-storage\") pod \"crc-storage-crc-nmc9j\" (UID: \"cfecba67-1f70-4572-88ac-fe9aab717b5d\") " pod="crc-storage/crc-storage-crc-nmc9j" Jan 05 23:07:06 crc kubenswrapper[4910]: I0105 23:07:06.690827 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tzp95\" (UniqueName: \"kubernetes.io/projected/cfecba67-1f70-4572-88ac-fe9aab717b5d-kube-api-access-tzp95\") pod \"crc-storage-crc-nmc9j\" (UID: \"cfecba67-1f70-4572-88ac-fe9aab717b5d\") " pod="crc-storage/crc-storage-crc-nmc9j" Jan 05 23:07:06 crc kubenswrapper[4910]: I0105 23:07:06.690868 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/cfecba67-1f70-4572-88ac-fe9aab717b5d-node-mnt\") pod \"crc-storage-crc-nmc9j\" (UID: \"cfecba67-1f70-4572-88ac-fe9aab717b5d\") " pod="crc-storage/crc-storage-crc-nmc9j" Jan 05 23:07:06 crc kubenswrapper[4910]: I0105 23:07:06.737496 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b8dbe479-e4d9-46e2-8efc-d862330f7f7c" path="/var/lib/kubelet/pods/b8dbe479-e4d9-46e2-8efc-d862330f7f7c/volumes" Jan 05 23:07:06 crc kubenswrapper[4910]: I0105 23:07:06.792232 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/cfecba67-1f70-4572-88ac-fe9aab717b5d-crc-storage\") pod \"crc-storage-crc-nmc9j\" (UID: \"cfecba67-1f70-4572-88ac-fe9aab717b5d\") " pod="crc-storage/crc-storage-crc-nmc9j" Jan 05 23:07:06 crc kubenswrapper[4910]: I0105 23:07:06.792315 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tzp95\" (UniqueName: \"kubernetes.io/projected/cfecba67-1f70-4572-88ac-fe9aab717b5d-kube-api-access-tzp95\") pod \"crc-storage-crc-nmc9j\" (UID: \"cfecba67-1f70-4572-88ac-fe9aab717b5d\") " pod="crc-storage/crc-storage-crc-nmc9j" Jan 05 23:07:06 crc kubenswrapper[4910]: I0105 23:07:06.792349 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/cfecba67-1f70-4572-88ac-fe9aab717b5d-node-mnt\") pod \"crc-storage-crc-nmc9j\" (UID: \"cfecba67-1f70-4572-88ac-fe9aab717b5d\") " pod="crc-storage/crc-storage-crc-nmc9j" Jan 05 23:07:06 crc kubenswrapper[4910]: I0105 23:07:06.792542 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/cfecba67-1f70-4572-88ac-fe9aab717b5d-node-mnt\") pod \"crc-storage-crc-nmc9j\" (UID: \"cfecba67-1f70-4572-88ac-fe9aab717b5d\") " pod="crc-storage/crc-storage-crc-nmc9j" Jan 05 23:07:06 crc kubenswrapper[4910]: I0105 23:07:06.793349 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/cfecba67-1f70-4572-88ac-fe9aab717b5d-crc-storage\") pod \"crc-storage-crc-nmc9j\" (UID: \"cfecba67-1f70-4572-88ac-fe9aab717b5d\") " pod="crc-storage/crc-storage-crc-nmc9j" Jan 05 23:07:06 crc kubenswrapper[4910]: I0105 23:07:06.825069 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tzp95\" (UniqueName: \"kubernetes.io/projected/cfecba67-1f70-4572-88ac-fe9aab717b5d-kube-api-access-tzp95\") pod \"crc-storage-crc-nmc9j\" (UID: \"cfecba67-1f70-4572-88ac-fe9aab717b5d\") " pod="crc-storage/crc-storage-crc-nmc9j" Jan 05 23:07:06 crc kubenswrapper[4910]: I0105 23:07:06.944500 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-nmc9j" Jan 05 23:07:07 crc kubenswrapper[4910]: I0105 23:07:07.415530 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-nmc9j"] Jan 05 23:07:07 crc kubenswrapper[4910]: I0105 23:07:07.742011 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-nmc9j" event={"ID":"cfecba67-1f70-4572-88ac-fe9aab717b5d","Type":"ContainerStarted","Data":"828e168d71713509a23c9eddde0d6c8fa5003c7aefb7929463866d3de74960e0"} Jan 05 23:07:08 crc kubenswrapper[4910]: I0105 23:07:08.758385 4910 generic.go:334] "Generic (PLEG): container finished" podID="cfecba67-1f70-4572-88ac-fe9aab717b5d" containerID="4bc12885be92d50dda7a8c48ce7ba6ee02f01ccf8e66c2a083436b2b3c34222a" exitCode=0 Jan 05 23:07:08 crc kubenswrapper[4910]: I0105 23:07:08.758844 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-nmc9j" event={"ID":"cfecba67-1f70-4572-88ac-fe9aab717b5d","Type":"ContainerDied","Data":"4bc12885be92d50dda7a8c48ce7ba6ee02f01ccf8e66c2a083436b2b3c34222a"} Jan 05 23:07:10 crc kubenswrapper[4910]: I0105 23:07:10.081420 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-nmc9j" Jan 05 23:07:10 crc kubenswrapper[4910]: I0105 23:07:10.147164 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tzp95\" (UniqueName: \"kubernetes.io/projected/cfecba67-1f70-4572-88ac-fe9aab717b5d-kube-api-access-tzp95\") pod \"cfecba67-1f70-4572-88ac-fe9aab717b5d\" (UID: \"cfecba67-1f70-4572-88ac-fe9aab717b5d\") " Jan 05 23:07:10 crc kubenswrapper[4910]: I0105 23:07:10.147289 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/cfecba67-1f70-4572-88ac-fe9aab717b5d-node-mnt\") pod \"cfecba67-1f70-4572-88ac-fe9aab717b5d\" (UID: \"cfecba67-1f70-4572-88ac-fe9aab717b5d\") " Jan 05 23:07:10 crc kubenswrapper[4910]: I0105 23:07:10.147349 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/cfecba67-1f70-4572-88ac-fe9aab717b5d-crc-storage\") pod \"cfecba67-1f70-4572-88ac-fe9aab717b5d\" (UID: \"cfecba67-1f70-4572-88ac-fe9aab717b5d\") " Jan 05 23:07:10 crc kubenswrapper[4910]: I0105 23:07:10.147465 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cfecba67-1f70-4572-88ac-fe9aab717b5d-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "cfecba67-1f70-4572-88ac-fe9aab717b5d" (UID: "cfecba67-1f70-4572-88ac-fe9aab717b5d"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 23:07:10 crc kubenswrapper[4910]: I0105 23:07:10.148214 4910 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/cfecba67-1f70-4572-88ac-fe9aab717b5d-node-mnt\") on node \"crc\" DevicePath \"\"" Jan 05 23:07:10 crc kubenswrapper[4910]: I0105 23:07:10.153328 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cfecba67-1f70-4572-88ac-fe9aab717b5d-kube-api-access-tzp95" (OuterVolumeSpecName: "kube-api-access-tzp95") pod "cfecba67-1f70-4572-88ac-fe9aab717b5d" (UID: "cfecba67-1f70-4572-88ac-fe9aab717b5d"). InnerVolumeSpecName "kube-api-access-tzp95". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:07:10 crc kubenswrapper[4910]: I0105 23:07:10.165713 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cfecba67-1f70-4572-88ac-fe9aab717b5d-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "cfecba67-1f70-4572-88ac-fe9aab717b5d" (UID: "cfecba67-1f70-4572-88ac-fe9aab717b5d"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:07:10 crc kubenswrapper[4910]: I0105 23:07:10.249642 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tzp95\" (UniqueName: \"kubernetes.io/projected/cfecba67-1f70-4572-88ac-fe9aab717b5d-kube-api-access-tzp95\") on node \"crc\" DevicePath \"\"" Jan 05 23:07:10 crc kubenswrapper[4910]: I0105 23:07:10.249678 4910 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/cfecba67-1f70-4572-88ac-fe9aab717b5d-crc-storage\") on node \"crc\" DevicePath \"\"" Jan 05 23:07:10 crc kubenswrapper[4910]: I0105 23:07:10.797421 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-nmc9j" event={"ID":"cfecba67-1f70-4572-88ac-fe9aab717b5d","Type":"ContainerDied","Data":"828e168d71713509a23c9eddde0d6c8fa5003c7aefb7929463866d3de74960e0"} Jan 05 23:07:10 crc kubenswrapper[4910]: I0105 23:07:10.797499 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="828e168d71713509a23c9eddde0d6c8fa5003c7aefb7929463866d3de74960e0" Jan 05 23:07:10 crc kubenswrapper[4910]: I0105 23:07:10.797552 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-nmc9j" Jan 05 23:07:10 crc kubenswrapper[4910]: I0105 23:07:10.952435 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 23:07:10 crc kubenswrapper[4910]: I0105 23:07:10.952867 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 23:07:10 crc kubenswrapper[4910]: I0105 23:07:10.952936 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 23:07:10 crc kubenswrapper[4910]: I0105 23:07:10.953728 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a91b2ed1dd18abdbeee2358c6f332ea718c2107406ac7cbb009f035571ce325d"} pod="openshift-machine-config-operator/machine-config-daemon-p4t85" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 05 23:07:10 crc kubenswrapper[4910]: I0105 23:07:10.953812 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" containerID="cri-o://a91b2ed1dd18abdbeee2358c6f332ea718c2107406ac7cbb009f035571ce325d" gracePeriod=600 Jan 05 23:07:11 crc kubenswrapper[4910]: E0105 23:07:11.595973 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:07:11 crc kubenswrapper[4910]: I0105 23:07:11.808670 4910 generic.go:334] "Generic (PLEG): container finished" podID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerID="a91b2ed1dd18abdbeee2358c6f332ea718c2107406ac7cbb009f035571ce325d" exitCode=0 Jan 05 23:07:11 crc kubenswrapper[4910]: I0105 23:07:11.808729 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerDied","Data":"a91b2ed1dd18abdbeee2358c6f332ea718c2107406ac7cbb009f035571ce325d"} Jan 05 23:07:11 crc kubenswrapper[4910]: I0105 23:07:11.808779 4910 scope.go:117] "RemoveContainer" containerID="4e7b32b882dd159d8555cffe618c0468a49206f8ed5fc41c354f16ebe332cf04" Jan 05 23:07:11 crc kubenswrapper[4910]: I0105 23:07:11.809466 4910 scope.go:117] "RemoveContainer" containerID="a91b2ed1dd18abdbeee2358c6f332ea718c2107406ac7cbb009f035571ce325d" Jan 05 23:07:11 crc kubenswrapper[4910]: E0105 23:07:11.809738 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:07:25 crc kubenswrapper[4910]: I0105 23:07:25.721757 4910 scope.go:117] "RemoveContainer" containerID="a91b2ed1dd18abdbeee2358c6f332ea718c2107406ac7cbb009f035571ce325d" Jan 05 23:07:25 crc kubenswrapper[4910]: E0105 23:07:25.722835 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:07:40 crc kubenswrapper[4910]: I0105 23:07:40.722572 4910 scope.go:117] "RemoveContainer" containerID="a91b2ed1dd18abdbeee2358c6f332ea718c2107406ac7cbb009f035571ce325d" Jan 05 23:07:40 crc kubenswrapper[4910]: E0105 23:07:40.723903 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:07:43 crc kubenswrapper[4910]: I0105 23:07:43.962149 4910 scope.go:117] "RemoveContainer" containerID="c6d0b7b76eace8e3666c9a94ff4f4bf4ba21a302e0bd644e48b5bd1e7499514c" Jan 05 23:07:55 crc kubenswrapper[4910]: I0105 23:07:55.721157 4910 scope.go:117] "RemoveContainer" containerID="a91b2ed1dd18abdbeee2358c6f332ea718c2107406ac7cbb009f035571ce325d" Jan 05 23:07:55 crc kubenswrapper[4910]: E0105 23:07:55.721840 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:08:06 crc kubenswrapper[4910]: I0105 23:08:06.721718 4910 scope.go:117] "RemoveContainer" containerID="a91b2ed1dd18abdbeee2358c6f332ea718c2107406ac7cbb009f035571ce325d" Jan 05 23:08:06 crc kubenswrapper[4910]: E0105 23:08:06.723460 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:08:19 crc kubenswrapper[4910]: I0105 23:08:19.722164 4910 scope.go:117] "RemoveContainer" containerID="a91b2ed1dd18abdbeee2358c6f332ea718c2107406ac7cbb009f035571ce325d" Jan 05 23:08:19 crc kubenswrapper[4910]: E0105 23:08:19.723691 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:08:33 crc kubenswrapper[4910]: I0105 23:08:33.722329 4910 scope.go:117] "RemoveContainer" containerID="a91b2ed1dd18abdbeee2358c6f332ea718c2107406ac7cbb009f035571ce325d" Jan 05 23:08:33 crc kubenswrapper[4910]: E0105 23:08:33.723990 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:08:48 crc kubenswrapper[4910]: I0105 23:08:48.728642 4910 scope.go:117] "RemoveContainer" containerID="a91b2ed1dd18abdbeee2358c6f332ea718c2107406ac7cbb009f035571ce325d" Jan 05 23:08:48 crc kubenswrapper[4910]: E0105 23:08:48.729974 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:09:00 crc kubenswrapper[4910]: I0105 23:09:00.722350 4910 scope.go:117] "RemoveContainer" containerID="a91b2ed1dd18abdbeee2358c6f332ea718c2107406ac7cbb009f035571ce325d" Jan 05 23:09:00 crc kubenswrapper[4910]: E0105 23:09:00.723450 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:09:15 crc kubenswrapper[4910]: I0105 23:09:15.721373 4910 scope.go:117] "RemoveContainer" containerID="a91b2ed1dd18abdbeee2358c6f332ea718c2107406ac7cbb009f035571ce325d" Jan 05 23:09:15 crc kubenswrapper[4910]: E0105 23:09:15.722276 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:09:27 crc kubenswrapper[4910]: I0105 23:09:27.722220 4910 scope.go:117] "RemoveContainer" containerID="a91b2ed1dd18abdbeee2358c6f332ea718c2107406ac7cbb009f035571ce325d" Jan 05 23:09:27 crc kubenswrapper[4910]: E0105 23:09:27.723595 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:09:39 crc kubenswrapper[4910]: I0105 23:09:39.723013 4910 scope.go:117] "RemoveContainer" containerID="a91b2ed1dd18abdbeee2358c6f332ea718c2107406ac7cbb009f035571ce325d" Jan 05 23:09:39 crc kubenswrapper[4910]: E0105 23:09:39.724358 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:09:50 crc kubenswrapper[4910]: I0105 23:09:50.676400 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-q64jg"] Jan 05 23:09:50 crc kubenswrapper[4910]: E0105 23:09:50.677763 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfecba67-1f70-4572-88ac-fe9aab717b5d" containerName="storage" Jan 05 23:09:50 crc kubenswrapper[4910]: I0105 23:09:50.677788 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfecba67-1f70-4572-88ac-fe9aab717b5d" containerName="storage" Jan 05 23:09:50 crc kubenswrapper[4910]: I0105 23:09:50.678057 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="cfecba67-1f70-4572-88ac-fe9aab717b5d" containerName="storage" Jan 05 23:09:50 crc kubenswrapper[4910]: I0105 23:09:50.680050 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q64jg" Jan 05 23:09:50 crc kubenswrapper[4910]: I0105 23:09:50.709232 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-q64jg"] Jan 05 23:09:50 crc kubenswrapper[4910]: I0105 23:09:50.830057 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc424e4b-2e16-476d-a6ec-88accb6f73eb-utilities\") pod \"redhat-operators-q64jg\" (UID: \"cc424e4b-2e16-476d-a6ec-88accb6f73eb\") " pod="openshift-marketplace/redhat-operators-q64jg" Jan 05 23:09:50 crc kubenswrapper[4910]: I0105 23:09:50.830215 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc424e4b-2e16-476d-a6ec-88accb6f73eb-catalog-content\") pod \"redhat-operators-q64jg\" (UID: \"cc424e4b-2e16-476d-a6ec-88accb6f73eb\") " pod="openshift-marketplace/redhat-operators-q64jg" Jan 05 23:09:50 crc kubenswrapper[4910]: I0105 23:09:50.830492 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2wrkz\" (UniqueName: \"kubernetes.io/projected/cc424e4b-2e16-476d-a6ec-88accb6f73eb-kube-api-access-2wrkz\") pod \"redhat-operators-q64jg\" (UID: \"cc424e4b-2e16-476d-a6ec-88accb6f73eb\") " pod="openshift-marketplace/redhat-operators-q64jg" Jan 05 23:09:50 crc kubenswrapper[4910]: I0105 23:09:50.932263 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc424e4b-2e16-476d-a6ec-88accb6f73eb-catalog-content\") pod \"redhat-operators-q64jg\" (UID: \"cc424e4b-2e16-476d-a6ec-88accb6f73eb\") " pod="openshift-marketplace/redhat-operators-q64jg" Jan 05 23:09:50 crc kubenswrapper[4910]: I0105 23:09:50.932348 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2wrkz\" (UniqueName: \"kubernetes.io/projected/cc424e4b-2e16-476d-a6ec-88accb6f73eb-kube-api-access-2wrkz\") pod \"redhat-operators-q64jg\" (UID: \"cc424e4b-2e16-476d-a6ec-88accb6f73eb\") " pod="openshift-marketplace/redhat-operators-q64jg" Jan 05 23:09:50 crc kubenswrapper[4910]: I0105 23:09:50.932391 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc424e4b-2e16-476d-a6ec-88accb6f73eb-utilities\") pod \"redhat-operators-q64jg\" (UID: \"cc424e4b-2e16-476d-a6ec-88accb6f73eb\") " pod="openshift-marketplace/redhat-operators-q64jg" Jan 05 23:09:50 crc kubenswrapper[4910]: I0105 23:09:50.933007 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc424e4b-2e16-476d-a6ec-88accb6f73eb-catalog-content\") pod \"redhat-operators-q64jg\" (UID: \"cc424e4b-2e16-476d-a6ec-88accb6f73eb\") " pod="openshift-marketplace/redhat-operators-q64jg" Jan 05 23:09:50 crc kubenswrapper[4910]: I0105 23:09:50.933031 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc424e4b-2e16-476d-a6ec-88accb6f73eb-utilities\") pod \"redhat-operators-q64jg\" (UID: \"cc424e4b-2e16-476d-a6ec-88accb6f73eb\") " pod="openshift-marketplace/redhat-operators-q64jg" Jan 05 23:09:50 crc kubenswrapper[4910]: I0105 23:09:50.961594 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2wrkz\" (UniqueName: \"kubernetes.io/projected/cc424e4b-2e16-476d-a6ec-88accb6f73eb-kube-api-access-2wrkz\") pod \"redhat-operators-q64jg\" (UID: \"cc424e4b-2e16-476d-a6ec-88accb6f73eb\") " pod="openshift-marketplace/redhat-operators-q64jg" Jan 05 23:09:51 crc kubenswrapper[4910]: I0105 23:09:51.053412 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q64jg" Jan 05 23:09:51 crc kubenswrapper[4910]: I0105 23:09:51.338020 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-q64jg"] Jan 05 23:09:51 crc kubenswrapper[4910]: I0105 23:09:51.376989 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q64jg" event={"ID":"cc424e4b-2e16-476d-a6ec-88accb6f73eb","Type":"ContainerStarted","Data":"b3a0dc43499257eea07fc50d47589d8f637c52d9eb36fcad7cf60aa48d5882c5"} Jan 05 23:09:52 crc kubenswrapper[4910]: I0105 23:09:52.070372 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-578wd"] Jan 05 23:09:52 crc kubenswrapper[4910]: I0105 23:09:52.072600 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-578wd" Jan 05 23:09:52 crc kubenswrapper[4910]: I0105 23:09:52.093104 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-578wd"] Jan 05 23:09:52 crc kubenswrapper[4910]: I0105 23:09:52.151179 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/699f86cd-0dfb-4bd0-92c6-091490d6c013-utilities\") pod \"certified-operators-578wd\" (UID: \"699f86cd-0dfb-4bd0-92c6-091490d6c013\") " pod="openshift-marketplace/certified-operators-578wd" Jan 05 23:09:52 crc kubenswrapper[4910]: I0105 23:09:52.151264 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/699f86cd-0dfb-4bd0-92c6-091490d6c013-catalog-content\") pod \"certified-operators-578wd\" (UID: \"699f86cd-0dfb-4bd0-92c6-091490d6c013\") " pod="openshift-marketplace/certified-operators-578wd" Jan 05 23:09:52 crc kubenswrapper[4910]: I0105 23:09:52.151740 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwj5s\" (UniqueName: \"kubernetes.io/projected/699f86cd-0dfb-4bd0-92c6-091490d6c013-kube-api-access-xwj5s\") pod \"certified-operators-578wd\" (UID: \"699f86cd-0dfb-4bd0-92c6-091490d6c013\") " pod="openshift-marketplace/certified-operators-578wd" Jan 05 23:09:52 crc kubenswrapper[4910]: I0105 23:09:52.253137 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/699f86cd-0dfb-4bd0-92c6-091490d6c013-catalog-content\") pod \"certified-operators-578wd\" (UID: \"699f86cd-0dfb-4bd0-92c6-091490d6c013\") " pod="openshift-marketplace/certified-operators-578wd" Jan 05 23:09:52 crc kubenswrapper[4910]: I0105 23:09:52.253261 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xwj5s\" (UniqueName: \"kubernetes.io/projected/699f86cd-0dfb-4bd0-92c6-091490d6c013-kube-api-access-xwj5s\") pod \"certified-operators-578wd\" (UID: \"699f86cd-0dfb-4bd0-92c6-091490d6c013\") " pod="openshift-marketplace/certified-operators-578wd" Jan 05 23:09:52 crc kubenswrapper[4910]: I0105 23:09:52.253329 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/699f86cd-0dfb-4bd0-92c6-091490d6c013-utilities\") pod \"certified-operators-578wd\" (UID: \"699f86cd-0dfb-4bd0-92c6-091490d6c013\") " pod="openshift-marketplace/certified-operators-578wd" Jan 05 23:09:52 crc kubenswrapper[4910]: I0105 23:09:52.254099 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/699f86cd-0dfb-4bd0-92c6-091490d6c013-utilities\") pod \"certified-operators-578wd\" (UID: \"699f86cd-0dfb-4bd0-92c6-091490d6c013\") " pod="openshift-marketplace/certified-operators-578wd" Jan 05 23:09:52 crc kubenswrapper[4910]: I0105 23:09:52.254085 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/699f86cd-0dfb-4bd0-92c6-091490d6c013-catalog-content\") pod \"certified-operators-578wd\" (UID: \"699f86cd-0dfb-4bd0-92c6-091490d6c013\") " pod="openshift-marketplace/certified-operators-578wd" Jan 05 23:09:52 crc kubenswrapper[4910]: I0105 23:09:52.279978 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwj5s\" (UniqueName: \"kubernetes.io/projected/699f86cd-0dfb-4bd0-92c6-091490d6c013-kube-api-access-xwj5s\") pod \"certified-operators-578wd\" (UID: \"699f86cd-0dfb-4bd0-92c6-091490d6c013\") " pod="openshift-marketplace/certified-operators-578wd" Jan 05 23:09:52 crc kubenswrapper[4910]: I0105 23:09:52.385912 4910 generic.go:334] "Generic (PLEG): container finished" podID="cc424e4b-2e16-476d-a6ec-88accb6f73eb" containerID="1d5b8f091dd10c74f966e16bcdad1f0a52253a548563035d528ea6d03a50231f" exitCode=0 Jan 05 23:09:52 crc kubenswrapper[4910]: I0105 23:09:52.385966 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q64jg" event={"ID":"cc424e4b-2e16-476d-a6ec-88accb6f73eb","Type":"ContainerDied","Data":"1d5b8f091dd10c74f966e16bcdad1f0a52253a548563035d528ea6d03a50231f"} Jan 05 23:09:52 crc kubenswrapper[4910]: I0105 23:09:52.401025 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-578wd" Jan 05 23:09:52 crc kubenswrapper[4910]: I0105 23:09:52.715374 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-578wd"] Jan 05 23:09:53 crc kubenswrapper[4910]: I0105 23:09:53.398988 4910 generic.go:334] "Generic (PLEG): container finished" podID="699f86cd-0dfb-4bd0-92c6-091490d6c013" containerID="22669cef80c8204bdf19393d070a3ec41bfb5cb09d11eb205ef4a9fd24430348" exitCode=0 Jan 05 23:09:53 crc kubenswrapper[4910]: I0105 23:09:53.399083 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-578wd" event={"ID":"699f86cd-0dfb-4bd0-92c6-091490d6c013","Type":"ContainerDied","Data":"22669cef80c8204bdf19393d070a3ec41bfb5cb09d11eb205ef4a9fd24430348"} Jan 05 23:09:53 crc kubenswrapper[4910]: I0105 23:09:53.399751 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-578wd" event={"ID":"699f86cd-0dfb-4bd0-92c6-091490d6c013","Type":"ContainerStarted","Data":"1b0caa9f57d9ba6707dccdf0d495bdf1b2bb6bbe4a5ed8eb66ed1906a419ec0e"} Jan 05 23:09:53 crc kubenswrapper[4910]: I0105 23:09:53.721779 4910 scope.go:117] "RemoveContainer" containerID="a91b2ed1dd18abdbeee2358c6f332ea718c2107406ac7cbb009f035571ce325d" Jan 05 23:09:53 crc kubenswrapper[4910]: E0105 23:09:53.722642 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:09:54 crc kubenswrapper[4910]: I0105 23:09:54.416981 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q64jg" event={"ID":"cc424e4b-2e16-476d-a6ec-88accb6f73eb","Type":"ContainerStarted","Data":"734f7f7e0d924a0769122abace7d7e010a92711fe9d18c5c5a962a4364f93b08"} Jan 05 23:09:54 crc kubenswrapper[4910]: I0105 23:09:54.421354 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-578wd" event={"ID":"699f86cd-0dfb-4bd0-92c6-091490d6c013","Type":"ContainerStarted","Data":"796594d1f2b4cce5bd4c03ba274544e26074780383cc35446ac73c5c1436ff22"} Jan 05 23:09:55 crc kubenswrapper[4910]: I0105 23:09:55.437522 4910 generic.go:334] "Generic (PLEG): container finished" podID="699f86cd-0dfb-4bd0-92c6-091490d6c013" containerID="796594d1f2b4cce5bd4c03ba274544e26074780383cc35446ac73c5c1436ff22" exitCode=0 Jan 05 23:09:55 crc kubenswrapper[4910]: I0105 23:09:55.437670 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-578wd" event={"ID":"699f86cd-0dfb-4bd0-92c6-091490d6c013","Type":"ContainerDied","Data":"796594d1f2b4cce5bd4c03ba274544e26074780383cc35446ac73c5c1436ff22"} Jan 05 23:09:55 crc kubenswrapper[4910]: I0105 23:09:55.452886 4910 generic.go:334] "Generic (PLEG): container finished" podID="cc424e4b-2e16-476d-a6ec-88accb6f73eb" containerID="734f7f7e0d924a0769122abace7d7e010a92711fe9d18c5c5a962a4364f93b08" exitCode=0 Jan 05 23:09:55 crc kubenswrapper[4910]: I0105 23:09:55.452938 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q64jg" event={"ID":"cc424e4b-2e16-476d-a6ec-88accb6f73eb","Type":"ContainerDied","Data":"734f7f7e0d924a0769122abace7d7e010a92711fe9d18c5c5a962a4364f93b08"} Jan 05 23:09:56 crc kubenswrapper[4910]: I0105 23:09:56.466216 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-578wd" event={"ID":"699f86cd-0dfb-4bd0-92c6-091490d6c013","Type":"ContainerStarted","Data":"a37a8a21d53859d817d41c5ff6981589a0250792f05663e2de5deabfcd9d9f22"} Jan 05 23:09:56 crc kubenswrapper[4910]: I0105 23:09:56.470676 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q64jg" event={"ID":"cc424e4b-2e16-476d-a6ec-88accb6f73eb","Type":"ContainerStarted","Data":"4f79f519c300197fa8734e7be97835e7c0c71ab81d0abc1278793abcad51a624"} Jan 05 23:09:56 crc kubenswrapper[4910]: I0105 23:09:56.503262 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-578wd" podStartSLOduration=2.027807685 podStartE2EDuration="4.503228076s" podCreationTimestamp="2026-01-05 23:09:52 +0000 UTC" firstStartedPulling="2026-01-05 23:09:53.404111723 +0000 UTC m=+4724.981609433" lastFinishedPulling="2026-01-05 23:09:55.879532114 +0000 UTC m=+4727.457029824" observedRunningTime="2026-01-05 23:09:56.493040554 +0000 UTC m=+4728.070538254" watchObservedRunningTime="2026-01-05 23:09:56.503228076 +0000 UTC m=+4728.080725776" Jan 05 23:09:56 crc kubenswrapper[4910]: I0105 23:09:56.528915 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-q64jg" podStartSLOduration=2.981837836 podStartE2EDuration="6.52888518s" podCreationTimestamp="2026-01-05 23:09:50 +0000 UTC" firstStartedPulling="2026-01-05 23:09:52.387611111 +0000 UTC m=+4723.965108781" lastFinishedPulling="2026-01-05 23:09:55.934658425 +0000 UTC m=+4727.512156125" observedRunningTime="2026-01-05 23:09:56.521083957 +0000 UTC m=+4728.098581657" watchObservedRunningTime="2026-01-05 23:09:56.52888518 +0000 UTC m=+4728.106382880" Jan 05 23:10:01 crc kubenswrapper[4910]: I0105 23:10:01.054876 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-q64jg" Jan 05 23:10:01 crc kubenswrapper[4910]: I0105 23:10:01.055397 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-q64jg" Jan 05 23:10:02 crc kubenswrapper[4910]: I0105 23:10:02.130102 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-q64jg" podUID="cc424e4b-2e16-476d-a6ec-88accb6f73eb" containerName="registry-server" probeResult="failure" output=< Jan 05 23:10:02 crc kubenswrapper[4910]: timeout: failed to connect service ":50051" within 1s Jan 05 23:10:02 crc kubenswrapper[4910]: > Jan 05 23:10:02 crc kubenswrapper[4910]: I0105 23:10:02.401322 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-578wd" Jan 05 23:10:02 crc kubenswrapper[4910]: I0105 23:10:02.401412 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-578wd" Jan 05 23:10:02 crc kubenswrapper[4910]: I0105 23:10:02.485185 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-578wd" Jan 05 23:10:02 crc kubenswrapper[4910]: I0105 23:10:02.581107 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-578wd" Jan 05 23:10:02 crc kubenswrapper[4910]: I0105 23:10:02.738379 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-578wd"] Jan 05 23:10:04 crc kubenswrapper[4910]: I0105 23:10:04.550422 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-578wd" podUID="699f86cd-0dfb-4bd0-92c6-091490d6c013" containerName="registry-server" containerID="cri-o://a37a8a21d53859d817d41c5ff6981589a0250792f05663e2de5deabfcd9d9f22" gracePeriod=2 Jan 05 23:10:07 crc kubenswrapper[4910]: I0105 23:10:07.590836 4910 generic.go:334] "Generic (PLEG): container finished" podID="699f86cd-0dfb-4bd0-92c6-091490d6c013" containerID="a37a8a21d53859d817d41c5ff6981589a0250792f05663e2de5deabfcd9d9f22" exitCode=0 Jan 05 23:10:07 crc kubenswrapper[4910]: I0105 23:10:07.590923 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-578wd" event={"ID":"699f86cd-0dfb-4bd0-92c6-091490d6c013","Type":"ContainerDied","Data":"a37a8a21d53859d817d41c5ff6981589a0250792f05663e2de5deabfcd9d9f22"} Jan 05 23:10:07 crc kubenswrapper[4910]: I0105 23:10:07.681869 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-578wd" Jan 05 23:10:07 crc kubenswrapper[4910]: I0105 23:10:07.723518 4910 scope.go:117] "RemoveContainer" containerID="a91b2ed1dd18abdbeee2358c6f332ea718c2107406ac7cbb009f035571ce325d" Jan 05 23:10:07 crc kubenswrapper[4910]: E0105 23:10:07.724249 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:10:07 crc kubenswrapper[4910]: I0105 23:10:07.762848 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/699f86cd-0dfb-4bd0-92c6-091490d6c013-utilities\") pod \"699f86cd-0dfb-4bd0-92c6-091490d6c013\" (UID: \"699f86cd-0dfb-4bd0-92c6-091490d6c013\") " Jan 05 23:10:07 crc kubenswrapper[4910]: I0105 23:10:07.762943 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/699f86cd-0dfb-4bd0-92c6-091490d6c013-catalog-content\") pod \"699f86cd-0dfb-4bd0-92c6-091490d6c013\" (UID: \"699f86cd-0dfb-4bd0-92c6-091490d6c013\") " Jan 05 23:10:07 crc kubenswrapper[4910]: I0105 23:10:07.763115 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xwj5s\" (UniqueName: \"kubernetes.io/projected/699f86cd-0dfb-4bd0-92c6-091490d6c013-kube-api-access-xwj5s\") pod \"699f86cd-0dfb-4bd0-92c6-091490d6c013\" (UID: \"699f86cd-0dfb-4bd0-92c6-091490d6c013\") " Jan 05 23:10:07 crc kubenswrapper[4910]: I0105 23:10:07.764885 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/699f86cd-0dfb-4bd0-92c6-091490d6c013-utilities" (OuterVolumeSpecName: "utilities") pod "699f86cd-0dfb-4bd0-92c6-091490d6c013" (UID: "699f86cd-0dfb-4bd0-92c6-091490d6c013"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:10:07 crc kubenswrapper[4910]: I0105 23:10:07.773180 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/699f86cd-0dfb-4bd0-92c6-091490d6c013-kube-api-access-xwj5s" (OuterVolumeSpecName: "kube-api-access-xwj5s") pod "699f86cd-0dfb-4bd0-92c6-091490d6c013" (UID: "699f86cd-0dfb-4bd0-92c6-091490d6c013"). InnerVolumeSpecName "kube-api-access-xwj5s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:10:07 crc kubenswrapper[4910]: I0105 23:10:07.831805 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/699f86cd-0dfb-4bd0-92c6-091490d6c013-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "699f86cd-0dfb-4bd0-92c6-091490d6c013" (UID: "699f86cd-0dfb-4bd0-92c6-091490d6c013"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:10:07 crc kubenswrapper[4910]: I0105 23:10:07.867771 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xwj5s\" (UniqueName: \"kubernetes.io/projected/699f86cd-0dfb-4bd0-92c6-091490d6c013-kube-api-access-xwj5s\") on node \"crc\" DevicePath \"\"" Jan 05 23:10:07 crc kubenswrapper[4910]: I0105 23:10:07.867834 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/699f86cd-0dfb-4bd0-92c6-091490d6c013-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 23:10:07 crc kubenswrapper[4910]: I0105 23:10:07.867853 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/699f86cd-0dfb-4bd0-92c6-091490d6c013-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 23:10:08 crc kubenswrapper[4910]: I0105 23:10:08.604304 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-578wd" event={"ID":"699f86cd-0dfb-4bd0-92c6-091490d6c013","Type":"ContainerDied","Data":"1b0caa9f57d9ba6707dccdf0d495bdf1b2bb6bbe4a5ed8eb66ed1906a419ec0e"} Jan 05 23:10:08 crc kubenswrapper[4910]: I0105 23:10:08.604394 4910 scope.go:117] "RemoveContainer" containerID="a37a8a21d53859d817d41c5ff6981589a0250792f05663e2de5deabfcd9d9f22" Jan 05 23:10:08 crc kubenswrapper[4910]: I0105 23:10:08.604397 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-578wd" Jan 05 23:10:08 crc kubenswrapper[4910]: I0105 23:10:08.637335 4910 scope.go:117] "RemoveContainer" containerID="796594d1f2b4cce5bd4c03ba274544e26074780383cc35446ac73c5c1436ff22" Jan 05 23:10:08 crc kubenswrapper[4910]: I0105 23:10:08.668752 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-578wd"] Jan 05 23:10:08 crc kubenswrapper[4910]: I0105 23:10:08.682205 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-578wd"] Jan 05 23:10:08 crc kubenswrapper[4910]: I0105 23:10:08.687316 4910 scope.go:117] "RemoveContainer" containerID="22669cef80c8204bdf19393d070a3ec41bfb5cb09d11eb205ef4a9fd24430348" Jan 05 23:10:08 crc kubenswrapper[4910]: I0105 23:10:08.737982 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="699f86cd-0dfb-4bd0-92c6-091490d6c013" path="/var/lib/kubelet/pods/699f86cd-0dfb-4bd0-92c6-091490d6c013/volumes" Jan 05 23:10:11 crc kubenswrapper[4910]: I0105 23:10:11.114961 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-q64jg" Jan 05 23:10:11 crc kubenswrapper[4910]: I0105 23:10:11.190616 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-q64jg" Jan 05 23:10:11 crc kubenswrapper[4910]: I0105 23:10:11.367874 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-q64jg"] Jan 05 23:10:12 crc kubenswrapper[4910]: I0105 23:10:12.650203 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-q64jg" podUID="cc424e4b-2e16-476d-a6ec-88accb6f73eb" containerName="registry-server" containerID="cri-o://4f79f519c300197fa8734e7be97835e7c0c71ab81d0abc1278793abcad51a624" gracePeriod=2 Jan 05 23:10:13 crc kubenswrapper[4910]: I0105 23:10:13.165018 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q64jg" Jan 05 23:10:13 crc kubenswrapper[4910]: I0105 23:10:13.267147 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc424e4b-2e16-476d-a6ec-88accb6f73eb-utilities\") pod \"cc424e4b-2e16-476d-a6ec-88accb6f73eb\" (UID: \"cc424e4b-2e16-476d-a6ec-88accb6f73eb\") " Jan 05 23:10:13 crc kubenswrapper[4910]: I0105 23:10:13.267327 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc424e4b-2e16-476d-a6ec-88accb6f73eb-catalog-content\") pod \"cc424e4b-2e16-476d-a6ec-88accb6f73eb\" (UID: \"cc424e4b-2e16-476d-a6ec-88accb6f73eb\") " Jan 05 23:10:13 crc kubenswrapper[4910]: I0105 23:10:13.267369 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2wrkz\" (UniqueName: \"kubernetes.io/projected/cc424e4b-2e16-476d-a6ec-88accb6f73eb-kube-api-access-2wrkz\") pod \"cc424e4b-2e16-476d-a6ec-88accb6f73eb\" (UID: \"cc424e4b-2e16-476d-a6ec-88accb6f73eb\") " Jan 05 23:10:13 crc kubenswrapper[4910]: I0105 23:10:13.269665 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc424e4b-2e16-476d-a6ec-88accb6f73eb-utilities" (OuterVolumeSpecName: "utilities") pod "cc424e4b-2e16-476d-a6ec-88accb6f73eb" (UID: "cc424e4b-2e16-476d-a6ec-88accb6f73eb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:10:13 crc kubenswrapper[4910]: I0105 23:10:13.278274 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc424e4b-2e16-476d-a6ec-88accb6f73eb-kube-api-access-2wrkz" (OuterVolumeSpecName: "kube-api-access-2wrkz") pod "cc424e4b-2e16-476d-a6ec-88accb6f73eb" (UID: "cc424e4b-2e16-476d-a6ec-88accb6f73eb"). InnerVolumeSpecName "kube-api-access-2wrkz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:10:13 crc kubenswrapper[4910]: I0105 23:10:13.369601 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc424e4b-2e16-476d-a6ec-88accb6f73eb-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 23:10:13 crc kubenswrapper[4910]: I0105 23:10:13.369669 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2wrkz\" (UniqueName: \"kubernetes.io/projected/cc424e4b-2e16-476d-a6ec-88accb6f73eb-kube-api-access-2wrkz\") on node \"crc\" DevicePath \"\"" Jan 05 23:10:13 crc kubenswrapper[4910]: I0105 23:10:13.455101 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc424e4b-2e16-476d-a6ec-88accb6f73eb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cc424e4b-2e16-476d-a6ec-88accb6f73eb" (UID: "cc424e4b-2e16-476d-a6ec-88accb6f73eb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:10:13 crc kubenswrapper[4910]: I0105 23:10:13.471270 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc424e4b-2e16-476d-a6ec-88accb6f73eb-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 23:10:13 crc kubenswrapper[4910]: I0105 23:10:13.666163 4910 generic.go:334] "Generic (PLEG): container finished" podID="cc424e4b-2e16-476d-a6ec-88accb6f73eb" containerID="4f79f519c300197fa8734e7be97835e7c0c71ab81d0abc1278793abcad51a624" exitCode=0 Jan 05 23:10:13 crc kubenswrapper[4910]: I0105 23:10:13.666232 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q64jg" event={"ID":"cc424e4b-2e16-476d-a6ec-88accb6f73eb","Type":"ContainerDied","Data":"4f79f519c300197fa8734e7be97835e7c0c71ab81d0abc1278793abcad51a624"} Jan 05 23:10:13 crc kubenswrapper[4910]: I0105 23:10:13.666286 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q64jg" event={"ID":"cc424e4b-2e16-476d-a6ec-88accb6f73eb","Type":"ContainerDied","Data":"b3a0dc43499257eea07fc50d47589d8f637c52d9eb36fcad7cf60aa48d5882c5"} Jan 05 23:10:13 crc kubenswrapper[4910]: I0105 23:10:13.666301 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q64jg" Jan 05 23:10:13 crc kubenswrapper[4910]: I0105 23:10:13.666316 4910 scope.go:117] "RemoveContainer" containerID="4f79f519c300197fa8734e7be97835e7c0c71ab81d0abc1278793abcad51a624" Jan 05 23:10:13 crc kubenswrapper[4910]: I0105 23:10:13.710715 4910 scope.go:117] "RemoveContainer" containerID="734f7f7e0d924a0769122abace7d7e010a92711fe9d18c5c5a962a4364f93b08" Jan 05 23:10:13 crc kubenswrapper[4910]: I0105 23:10:13.724583 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-q64jg"] Jan 05 23:10:13 crc kubenswrapper[4910]: I0105 23:10:13.740208 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-q64jg"] Jan 05 23:10:13 crc kubenswrapper[4910]: I0105 23:10:13.759157 4910 scope.go:117] "RemoveContainer" containerID="1d5b8f091dd10c74f966e16bcdad1f0a52253a548563035d528ea6d03a50231f" Jan 05 23:10:13 crc kubenswrapper[4910]: I0105 23:10:13.795420 4910 scope.go:117] "RemoveContainer" containerID="4f79f519c300197fa8734e7be97835e7c0c71ab81d0abc1278793abcad51a624" Jan 05 23:10:13 crc kubenswrapper[4910]: E0105 23:10:13.796393 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f79f519c300197fa8734e7be97835e7c0c71ab81d0abc1278793abcad51a624\": container with ID starting with 4f79f519c300197fa8734e7be97835e7c0c71ab81d0abc1278793abcad51a624 not found: ID does not exist" containerID="4f79f519c300197fa8734e7be97835e7c0c71ab81d0abc1278793abcad51a624" Jan 05 23:10:13 crc kubenswrapper[4910]: I0105 23:10:13.796458 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f79f519c300197fa8734e7be97835e7c0c71ab81d0abc1278793abcad51a624"} err="failed to get container status \"4f79f519c300197fa8734e7be97835e7c0c71ab81d0abc1278793abcad51a624\": rpc error: code = NotFound desc = could not find container \"4f79f519c300197fa8734e7be97835e7c0c71ab81d0abc1278793abcad51a624\": container with ID starting with 4f79f519c300197fa8734e7be97835e7c0c71ab81d0abc1278793abcad51a624 not found: ID does not exist" Jan 05 23:10:13 crc kubenswrapper[4910]: I0105 23:10:13.796502 4910 scope.go:117] "RemoveContainer" containerID="734f7f7e0d924a0769122abace7d7e010a92711fe9d18c5c5a962a4364f93b08" Jan 05 23:10:13 crc kubenswrapper[4910]: E0105 23:10:13.797293 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"734f7f7e0d924a0769122abace7d7e010a92711fe9d18c5c5a962a4364f93b08\": container with ID starting with 734f7f7e0d924a0769122abace7d7e010a92711fe9d18c5c5a962a4364f93b08 not found: ID does not exist" containerID="734f7f7e0d924a0769122abace7d7e010a92711fe9d18c5c5a962a4364f93b08" Jan 05 23:10:13 crc kubenswrapper[4910]: I0105 23:10:13.797547 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"734f7f7e0d924a0769122abace7d7e010a92711fe9d18c5c5a962a4364f93b08"} err="failed to get container status \"734f7f7e0d924a0769122abace7d7e010a92711fe9d18c5c5a962a4364f93b08\": rpc error: code = NotFound desc = could not find container \"734f7f7e0d924a0769122abace7d7e010a92711fe9d18c5c5a962a4364f93b08\": container with ID starting with 734f7f7e0d924a0769122abace7d7e010a92711fe9d18c5c5a962a4364f93b08 not found: ID does not exist" Jan 05 23:10:13 crc kubenswrapper[4910]: I0105 23:10:13.797729 4910 scope.go:117] "RemoveContainer" containerID="1d5b8f091dd10c74f966e16bcdad1f0a52253a548563035d528ea6d03a50231f" Jan 05 23:10:13 crc kubenswrapper[4910]: E0105 23:10:13.798675 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d5b8f091dd10c74f966e16bcdad1f0a52253a548563035d528ea6d03a50231f\": container with ID starting with 1d5b8f091dd10c74f966e16bcdad1f0a52253a548563035d528ea6d03a50231f not found: ID does not exist" containerID="1d5b8f091dd10c74f966e16bcdad1f0a52253a548563035d528ea6d03a50231f" Jan 05 23:10:13 crc kubenswrapper[4910]: I0105 23:10:13.798854 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d5b8f091dd10c74f966e16bcdad1f0a52253a548563035d528ea6d03a50231f"} err="failed to get container status \"1d5b8f091dd10c74f966e16bcdad1f0a52253a548563035d528ea6d03a50231f\": rpc error: code = NotFound desc = could not find container \"1d5b8f091dd10c74f966e16bcdad1f0a52253a548563035d528ea6d03a50231f\": container with ID starting with 1d5b8f091dd10c74f966e16bcdad1f0a52253a548563035d528ea6d03a50231f not found: ID does not exist" Jan 05 23:10:14 crc kubenswrapper[4910]: I0105 23:10:14.738063 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc424e4b-2e16-476d-a6ec-88accb6f73eb" path="/var/lib/kubelet/pods/cc424e4b-2e16-476d-a6ec-88accb6f73eb/volumes" Jan 05 23:10:18 crc kubenswrapper[4910]: I0105 23:10:18.727270 4910 scope.go:117] "RemoveContainer" containerID="a91b2ed1dd18abdbeee2358c6f332ea718c2107406ac7cbb009f035571ce325d" Jan 05 23:10:18 crc kubenswrapper[4910]: E0105 23:10:18.727885 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:10:21 crc kubenswrapper[4910]: I0105 23:10:21.476331 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-95587bc99-dfhv7"] Jan 05 23:10:21 crc kubenswrapper[4910]: E0105 23:10:21.477000 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc424e4b-2e16-476d-a6ec-88accb6f73eb" containerName="extract-content" Jan 05 23:10:21 crc kubenswrapper[4910]: I0105 23:10:21.477013 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc424e4b-2e16-476d-a6ec-88accb6f73eb" containerName="extract-content" Jan 05 23:10:21 crc kubenswrapper[4910]: E0105 23:10:21.477033 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc424e4b-2e16-476d-a6ec-88accb6f73eb" containerName="registry-server" Jan 05 23:10:21 crc kubenswrapper[4910]: I0105 23:10:21.477047 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc424e4b-2e16-476d-a6ec-88accb6f73eb" containerName="registry-server" Jan 05 23:10:21 crc kubenswrapper[4910]: E0105 23:10:21.477057 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="699f86cd-0dfb-4bd0-92c6-091490d6c013" containerName="extract-content" Jan 05 23:10:21 crc kubenswrapper[4910]: I0105 23:10:21.477063 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="699f86cd-0dfb-4bd0-92c6-091490d6c013" containerName="extract-content" Jan 05 23:10:21 crc kubenswrapper[4910]: E0105 23:10:21.477076 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="699f86cd-0dfb-4bd0-92c6-091490d6c013" containerName="extract-utilities" Jan 05 23:10:21 crc kubenswrapper[4910]: I0105 23:10:21.477082 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="699f86cd-0dfb-4bd0-92c6-091490d6c013" containerName="extract-utilities" Jan 05 23:10:21 crc kubenswrapper[4910]: E0105 23:10:21.477094 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="699f86cd-0dfb-4bd0-92c6-091490d6c013" containerName="registry-server" Jan 05 23:10:21 crc kubenswrapper[4910]: I0105 23:10:21.477099 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="699f86cd-0dfb-4bd0-92c6-091490d6c013" containerName="registry-server" Jan 05 23:10:21 crc kubenswrapper[4910]: E0105 23:10:21.477114 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc424e4b-2e16-476d-a6ec-88accb6f73eb" containerName="extract-utilities" Jan 05 23:10:21 crc kubenswrapper[4910]: I0105 23:10:21.477134 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc424e4b-2e16-476d-a6ec-88accb6f73eb" containerName="extract-utilities" Jan 05 23:10:21 crc kubenswrapper[4910]: I0105 23:10:21.477264 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="699f86cd-0dfb-4bd0-92c6-091490d6c013" containerName="registry-server" Jan 05 23:10:21 crc kubenswrapper[4910]: I0105 23:10:21.477276 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc424e4b-2e16-476d-a6ec-88accb6f73eb" containerName="registry-server" Jan 05 23:10:21 crc kubenswrapper[4910]: I0105 23:10:21.478007 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95587bc99-dfhv7" Jan 05 23:10:21 crc kubenswrapper[4910]: I0105 23:10:21.480071 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Jan 05 23:10:21 crc kubenswrapper[4910]: I0105 23:10:21.480312 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Jan 05 23:10:21 crc kubenswrapper[4910]: I0105 23:10:21.480541 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Jan 05 23:10:21 crc kubenswrapper[4910]: I0105 23:10:21.485584 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Jan 05 23:10:21 crc kubenswrapper[4910]: I0105 23:10:21.485806 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-vjdvn" Jan 05 23:10:21 crc kubenswrapper[4910]: I0105 23:10:21.501905 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-95587bc99-dfhv7"] Jan 05 23:10:21 crc kubenswrapper[4910]: I0105 23:10:21.618717 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d7f771bf-44ed-4048-a0a2-891d2fb72d74-dns-svc\") pod \"dnsmasq-dns-95587bc99-dfhv7\" (UID: \"d7f771bf-44ed-4048-a0a2-891d2fb72d74\") " pod="openstack/dnsmasq-dns-95587bc99-dfhv7" Jan 05 23:10:21 crc kubenswrapper[4910]: I0105 23:10:21.618876 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8qtpw\" (UniqueName: \"kubernetes.io/projected/d7f771bf-44ed-4048-a0a2-891d2fb72d74-kube-api-access-8qtpw\") pod \"dnsmasq-dns-95587bc99-dfhv7\" (UID: \"d7f771bf-44ed-4048-a0a2-891d2fb72d74\") " pod="openstack/dnsmasq-dns-95587bc99-dfhv7" Jan 05 23:10:21 crc kubenswrapper[4910]: I0105 23:10:21.618933 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7f771bf-44ed-4048-a0a2-891d2fb72d74-config\") pod \"dnsmasq-dns-95587bc99-dfhv7\" (UID: \"d7f771bf-44ed-4048-a0a2-891d2fb72d74\") " pod="openstack/dnsmasq-dns-95587bc99-dfhv7" Jan 05 23:10:21 crc kubenswrapper[4910]: I0105 23:10:21.720652 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8qtpw\" (UniqueName: \"kubernetes.io/projected/d7f771bf-44ed-4048-a0a2-891d2fb72d74-kube-api-access-8qtpw\") pod \"dnsmasq-dns-95587bc99-dfhv7\" (UID: \"d7f771bf-44ed-4048-a0a2-891d2fb72d74\") " pod="openstack/dnsmasq-dns-95587bc99-dfhv7" Jan 05 23:10:21 crc kubenswrapper[4910]: I0105 23:10:21.720715 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7f771bf-44ed-4048-a0a2-891d2fb72d74-config\") pod \"dnsmasq-dns-95587bc99-dfhv7\" (UID: \"d7f771bf-44ed-4048-a0a2-891d2fb72d74\") " pod="openstack/dnsmasq-dns-95587bc99-dfhv7" Jan 05 23:10:21 crc kubenswrapper[4910]: I0105 23:10:21.720771 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d7f771bf-44ed-4048-a0a2-891d2fb72d74-dns-svc\") pod \"dnsmasq-dns-95587bc99-dfhv7\" (UID: \"d7f771bf-44ed-4048-a0a2-891d2fb72d74\") " pod="openstack/dnsmasq-dns-95587bc99-dfhv7" Jan 05 23:10:21 crc kubenswrapper[4910]: I0105 23:10:21.721660 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d7f771bf-44ed-4048-a0a2-891d2fb72d74-dns-svc\") pod \"dnsmasq-dns-95587bc99-dfhv7\" (UID: \"d7f771bf-44ed-4048-a0a2-891d2fb72d74\") " pod="openstack/dnsmasq-dns-95587bc99-dfhv7" Jan 05 23:10:21 crc kubenswrapper[4910]: I0105 23:10:21.721842 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7f771bf-44ed-4048-a0a2-891d2fb72d74-config\") pod \"dnsmasq-dns-95587bc99-dfhv7\" (UID: \"d7f771bf-44ed-4048-a0a2-891d2fb72d74\") " pod="openstack/dnsmasq-dns-95587bc99-dfhv7" Jan 05 23:10:21 crc kubenswrapper[4910]: I0105 23:10:21.764086 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8qtpw\" (UniqueName: \"kubernetes.io/projected/d7f771bf-44ed-4048-a0a2-891d2fb72d74-kube-api-access-8qtpw\") pod \"dnsmasq-dns-95587bc99-dfhv7\" (UID: \"d7f771bf-44ed-4048-a0a2-891d2fb72d74\") " pod="openstack/dnsmasq-dns-95587bc99-dfhv7" Jan 05 23:10:21 crc kubenswrapper[4910]: I0105 23:10:21.797171 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95587bc99-dfhv7" Jan 05 23:10:21 crc kubenswrapper[4910]: I0105 23:10:21.835894 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5d79f765b5-s7pxp"] Jan 05 23:10:21 crc kubenswrapper[4910]: I0105 23:10:21.837473 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d79f765b5-s7pxp" Jan 05 23:10:21 crc kubenswrapper[4910]: I0105 23:10:21.850313 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d79f765b5-s7pxp"] Jan 05 23:10:21 crc kubenswrapper[4910]: I0105 23:10:21.926865 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-79464\" (UniqueName: \"kubernetes.io/projected/94042c36-e8dd-4a95-9288-e2b3c14d16ed-kube-api-access-79464\") pod \"dnsmasq-dns-5d79f765b5-s7pxp\" (UID: \"94042c36-e8dd-4a95-9288-e2b3c14d16ed\") " pod="openstack/dnsmasq-dns-5d79f765b5-s7pxp" Jan 05 23:10:21 crc kubenswrapper[4910]: I0105 23:10:21.927280 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/94042c36-e8dd-4a95-9288-e2b3c14d16ed-config\") pod \"dnsmasq-dns-5d79f765b5-s7pxp\" (UID: \"94042c36-e8dd-4a95-9288-e2b3c14d16ed\") " pod="openstack/dnsmasq-dns-5d79f765b5-s7pxp" Jan 05 23:10:21 crc kubenswrapper[4910]: I0105 23:10:21.927359 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/94042c36-e8dd-4a95-9288-e2b3c14d16ed-dns-svc\") pod \"dnsmasq-dns-5d79f765b5-s7pxp\" (UID: \"94042c36-e8dd-4a95-9288-e2b3c14d16ed\") " pod="openstack/dnsmasq-dns-5d79f765b5-s7pxp" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.028947 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/94042c36-e8dd-4a95-9288-e2b3c14d16ed-config\") pod \"dnsmasq-dns-5d79f765b5-s7pxp\" (UID: \"94042c36-e8dd-4a95-9288-e2b3c14d16ed\") " pod="openstack/dnsmasq-dns-5d79f765b5-s7pxp" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.029002 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/94042c36-e8dd-4a95-9288-e2b3c14d16ed-dns-svc\") pod \"dnsmasq-dns-5d79f765b5-s7pxp\" (UID: \"94042c36-e8dd-4a95-9288-e2b3c14d16ed\") " pod="openstack/dnsmasq-dns-5d79f765b5-s7pxp" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.029097 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-79464\" (UniqueName: \"kubernetes.io/projected/94042c36-e8dd-4a95-9288-e2b3c14d16ed-kube-api-access-79464\") pod \"dnsmasq-dns-5d79f765b5-s7pxp\" (UID: \"94042c36-e8dd-4a95-9288-e2b3c14d16ed\") " pod="openstack/dnsmasq-dns-5d79f765b5-s7pxp" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.030213 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/94042c36-e8dd-4a95-9288-e2b3c14d16ed-config\") pod \"dnsmasq-dns-5d79f765b5-s7pxp\" (UID: \"94042c36-e8dd-4a95-9288-e2b3c14d16ed\") " pod="openstack/dnsmasq-dns-5d79f765b5-s7pxp" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.030461 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/94042c36-e8dd-4a95-9288-e2b3c14d16ed-dns-svc\") pod \"dnsmasq-dns-5d79f765b5-s7pxp\" (UID: \"94042c36-e8dd-4a95-9288-e2b3c14d16ed\") " pod="openstack/dnsmasq-dns-5d79f765b5-s7pxp" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.069434 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-79464\" (UniqueName: \"kubernetes.io/projected/94042c36-e8dd-4a95-9288-e2b3c14d16ed-kube-api-access-79464\") pod \"dnsmasq-dns-5d79f765b5-s7pxp\" (UID: \"94042c36-e8dd-4a95-9288-e2b3c14d16ed\") " pod="openstack/dnsmasq-dns-5d79f765b5-s7pxp" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.186702 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d79f765b5-s7pxp" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.299854 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-95587bc99-dfhv7"] Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.542630 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d79f765b5-s7pxp"] Jan 05 23:10:22 crc kubenswrapper[4910]: W0105 23:10:22.548615 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod94042c36_e8dd_4a95_9288_e2b3c14d16ed.slice/crio-d1a3791c1193f145fce1a1ef7e66512776e479e4c51a210ea9b8c1c35460d37d WatchSource:0}: Error finding container d1a3791c1193f145fce1a1ef7e66512776e479e4c51a210ea9b8c1c35460d37d: Status 404 returned error can't find the container with id d1a3791c1193f145fce1a1ef7e66512776e479e4c51a210ea9b8c1c35460d37d Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.665316 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.670341 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.672741 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.673330 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.673553 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.673833 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-p2qqm" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.674021 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.684955 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.742181 4910 generic.go:334] "Generic (PLEG): container finished" podID="d7f771bf-44ed-4048-a0a2-891d2fb72d74" containerID="6c803fdc5db2bc04b32a0565d7e6f7fcc1c7978e1103c00fa3d868e194499047" exitCode=0 Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.742264 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95587bc99-dfhv7" event={"ID":"d7f771bf-44ed-4048-a0a2-891d2fb72d74","Type":"ContainerDied","Data":"6c803fdc5db2bc04b32a0565d7e6f7fcc1c7978e1103c00fa3d868e194499047"} Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.742317 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95587bc99-dfhv7" event={"ID":"d7f771bf-44ed-4048-a0a2-891d2fb72d74","Type":"ContainerStarted","Data":"3e83e96f6bf36f391123c590d1248d8bd9f30f3fd0cdeab7df7ad010639a79dd"} Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.746475 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d79f765b5-s7pxp" event={"ID":"94042c36-e8dd-4a95-9288-e2b3c14d16ed","Type":"ContainerStarted","Data":"d1a3791c1193f145fce1a1ef7e66512776e479e4c51a210ea9b8c1c35460d37d"} Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.753340 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/fd8c27e8-d4f8-4b4a-b588-36235407cb65-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " pod="openstack/rabbitmq-server-0" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.753560 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/fd8c27e8-d4f8-4b4a-b588-36235407cb65-pod-info\") pod \"rabbitmq-server-0\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " pod="openstack/rabbitmq-server-0" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.753933 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/fd8c27e8-d4f8-4b4a-b588-36235407cb65-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " pod="openstack/rabbitmq-server-0" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.754057 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/fd8c27e8-d4f8-4b4a-b588-36235407cb65-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " pod="openstack/rabbitmq-server-0" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.754176 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/fd8c27e8-d4f8-4b4a-b588-36235407cb65-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " pod="openstack/rabbitmq-server-0" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.754325 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/fd8c27e8-d4f8-4b4a-b588-36235407cb65-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " pod="openstack/rabbitmq-server-0" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.754448 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-c980509f-4bcd-48d3-bedd-68cca7857d01\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c980509f-4bcd-48d3-bedd-68cca7857d01\") pod \"rabbitmq-server-0\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " pod="openstack/rabbitmq-server-0" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.754577 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/fd8c27e8-d4f8-4b4a-b588-36235407cb65-server-conf\") pod \"rabbitmq-server-0\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " pod="openstack/rabbitmq-server-0" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.754679 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ch22\" (UniqueName: \"kubernetes.io/projected/fd8c27e8-d4f8-4b4a-b588-36235407cb65-kube-api-access-4ch22\") pod \"rabbitmq-server-0\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " pod="openstack/rabbitmq-server-0" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.856463 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/fd8c27e8-d4f8-4b4a-b588-36235407cb65-pod-info\") pod \"rabbitmq-server-0\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " pod="openstack/rabbitmq-server-0" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.856537 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/fd8c27e8-d4f8-4b4a-b588-36235407cb65-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " pod="openstack/rabbitmq-server-0" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.856572 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/fd8c27e8-d4f8-4b4a-b588-36235407cb65-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " pod="openstack/rabbitmq-server-0" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.856595 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/fd8c27e8-d4f8-4b4a-b588-36235407cb65-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " pod="openstack/rabbitmq-server-0" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.856661 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/fd8c27e8-d4f8-4b4a-b588-36235407cb65-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " pod="openstack/rabbitmq-server-0" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.856705 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-c980509f-4bcd-48d3-bedd-68cca7857d01\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c980509f-4bcd-48d3-bedd-68cca7857d01\") pod \"rabbitmq-server-0\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " pod="openstack/rabbitmq-server-0" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.856757 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/fd8c27e8-d4f8-4b4a-b588-36235407cb65-server-conf\") pod \"rabbitmq-server-0\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " pod="openstack/rabbitmq-server-0" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.856792 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ch22\" (UniqueName: \"kubernetes.io/projected/fd8c27e8-d4f8-4b4a-b588-36235407cb65-kube-api-access-4ch22\") pod \"rabbitmq-server-0\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " pod="openstack/rabbitmq-server-0" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.856826 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/fd8c27e8-d4f8-4b4a-b588-36235407cb65-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " pod="openstack/rabbitmq-server-0" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.858547 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/fd8c27e8-d4f8-4b4a-b588-36235407cb65-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " pod="openstack/rabbitmq-server-0" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.858899 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/fd8c27e8-d4f8-4b4a-b588-36235407cb65-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " pod="openstack/rabbitmq-server-0" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.859534 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/fd8c27e8-d4f8-4b4a-b588-36235407cb65-server-conf\") pod \"rabbitmq-server-0\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " pod="openstack/rabbitmq-server-0" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.860338 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/fd8c27e8-d4f8-4b4a-b588-36235407cb65-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " pod="openstack/rabbitmq-server-0" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.861286 4910 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.861321 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-c980509f-4bcd-48d3-bedd-68cca7857d01\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c980509f-4bcd-48d3-bedd-68cca7857d01\") pod \"rabbitmq-server-0\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/aa87ac3f633e281ec836a753a99a3c6b975434127cedb14ddd1639acff6aaee3/globalmount\"" pod="openstack/rabbitmq-server-0" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.862704 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/fd8c27e8-d4f8-4b4a-b588-36235407cb65-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " pod="openstack/rabbitmq-server-0" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.863223 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/fd8c27e8-d4f8-4b4a-b588-36235407cb65-pod-info\") pod \"rabbitmq-server-0\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " pod="openstack/rabbitmq-server-0" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.877096 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ch22\" (UniqueName: \"kubernetes.io/projected/fd8c27e8-d4f8-4b4a-b588-36235407cb65-kube-api-access-4ch22\") pod \"rabbitmq-server-0\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " pod="openstack/rabbitmq-server-0" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.882858 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/fd8c27e8-d4f8-4b4a-b588-36235407cb65-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " pod="openstack/rabbitmq-server-0" Jan 05 23:10:22 crc kubenswrapper[4910]: I0105 23:10:22.926686 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-c980509f-4bcd-48d3-bedd-68cca7857d01\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c980509f-4bcd-48d3-bedd-68cca7857d01\") pod \"rabbitmq-server-0\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " pod="openstack/rabbitmq-server-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.017851 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 05 23:10:23 crc kubenswrapper[4910]: E0105 23:10:23.019478 4910 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Jan 05 23:10:23 crc kubenswrapper[4910]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/d7f771bf-44ed-4048-a0a2-891d2fb72d74/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Jan 05 23:10:23 crc kubenswrapper[4910]: > podSandboxID="3e83e96f6bf36f391123c590d1248d8bd9f30f3fd0cdeab7df7ad010639a79dd" Jan 05 23:10:23 crc kubenswrapper[4910]: E0105 23:10:23.019698 4910 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 05 23:10:23 crc kubenswrapper[4910]: container &Container{Name:dnsmasq-dns,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n8chc6h5bh56fh546hb7hc8h67h5bchffh577h697h5b5h5bdh59bhf6hf4h558hb5h578h595h5cchfbh644h59ch7fh654h547h587h5cbh5d5h8fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8qtpw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-95587bc99-dfhv7_openstack(d7f771bf-44ed-4048-a0a2-891d2fb72d74): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/d7f771bf-44ed-4048-a0a2-891d2fb72d74/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Jan 05 23:10:23 crc kubenswrapper[4910]: > logger="UnhandledError" Jan 05 23:10:23 crc kubenswrapper[4910]: E0105 23:10:23.020905 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dnsmasq-dns\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/d7f771bf-44ed-4048-a0a2-891d2fb72d74/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-95587bc99-dfhv7" podUID="d7f771bf-44ed-4048-a0a2-891d2fb72d74" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.024665 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.026377 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.028687 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.028860 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.030268 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.030303 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-swqv9" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.030447 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.042356 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.169800 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b4a03100-4353-4d31-815b-2ad6b4286473-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.170425 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b4a03100-4353-4d31-815b-2ad6b4286473-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.170553 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bjdhf\" (UniqueName: \"kubernetes.io/projected/b4a03100-4353-4d31-815b-2ad6b4286473-kube-api-access-bjdhf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.170871 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b4a03100-4353-4d31-815b-2ad6b4286473-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.171032 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b4a03100-4353-4d31-815b-2ad6b4286473-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.171250 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b4a03100-4353-4d31-815b-2ad6b4286473-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.171375 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-0be5cbae-d82d-4bda-9521-eac234c98e49\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0be5cbae-d82d-4bda-9521-eac234c98e49\") pod \"rabbitmq-cell1-server-0\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.171480 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b4a03100-4353-4d31-815b-2ad6b4286473-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.171598 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b4a03100-4353-4d31-815b-2ad6b4286473-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.273366 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b4a03100-4353-4d31-815b-2ad6b4286473-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.273428 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b4a03100-4353-4d31-815b-2ad6b4286473-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.273450 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bjdhf\" (UniqueName: \"kubernetes.io/projected/b4a03100-4353-4d31-815b-2ad6b4286473-kube-api-access-bjdhf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.273510 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b4a03100-4353-4d31-815b-2ad6b4286473-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.273542 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b4a03100-4353-4d31-815b-2ad6b4286473-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.273561 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b4a03100-4353-4d31-815b-2ad6b4286473-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.273584 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-0be5cbae-d82d-4bda-9521-eac234c98e49\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0be5cbae-d82d-4bda-9521-eac234c98e49\") pod \"rabbitmq-cell1-server-0\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.273609 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b4a03100-4353-4d31-815b-2ad6b4286473-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.273631 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b4a03100-4353-4d31-815b-2ad6b4286473-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.275459 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b4a03100-4353-4d31-815b-2ad6b4286473-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.275553 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b4a03100-4353-4d31-815b-2ad6b4286473-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.275948 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b4a03100-4353-4d31-815b-2ad6b4286473-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.276481 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b4a03100-4353-4d31-815b-2ad6b4286473-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.282171 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b4a03100-4353-4d31-815b-2ad6b4286473-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.283802 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b4a03100-4353-4d31-815b-2ad6b4286473-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.284357 4910 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.285161 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.284860 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b4a03100-4353-4d31-815b-2ad6b4286473-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.285107 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-0be5cbae-d82d-4bda-9521-eac234c98e49\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0be5cbae-d82d-4bda-9521-eac234c98e49\") pod \"rabbitmq-cell1-server-0\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1e4b04c69fd3a5251ce0da75b7a5ef8d4c0347e55c8b081209752616552e3bbf/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.296434 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bjdhf\" (UniqueName: \"kubernetes.io/projected/b4a03100-4353-4d31-815b-2ad6b4286473-kube-api-access-bjdhf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.325372 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-0be5cbae-d82d-4bda-9521-eac234c98e49\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0be5cbae-d82d-4bda-9521-eac234c98e49\") pod \"rabbitmq-cell1-server-0\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.384235 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.760334 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"fd8c27e8-d4f8-4b4a-b588-36235407cb65","Type":"ContainerStarted","Data":"1979c02a32ff231970eb4edeed22d749a41dbdcfb8664ce89604e8e3560bb0b1"} Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.765972 4910 generic.go:334] "Generic (PLEG): container finished" podID="94042c36-e8dd-4a95-9288-e2b3c14d16ed" containerID="4f14487b5b9356e354dc3fd424cb3bd2c60c03cc37ea66bf815b4a1fc000b3fe" exitCode=0 Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.766087 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d79f765b5-s7pxp" event={"ID":"94042c36-e8dd-4a95-9288-e2b3c14d16ed","Type":"ContainerDied","Data":"4f14487b5b9356e354dc3fd424cb3bd2c60c03cc37ea66bf815b4a1fc000b3fe"} Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.907976 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 05 23:10:23 crc kubenswrapper[4910]: W0105 23:10:23.912480 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb4a03100_4353_4d31_815b_2ad6b4286473.slice/crio-73b8fa22e0bf3e1a0cbcf0f619cfafb438bd69add552483dd8d3ac05f537c5df WatchSource:0}: Error finding container 73b8fa22e0bf3e1a0cbcf0f619cfafb438bd69add552483dd8d3ac05f537c5df: Status 404 returned error can't find the container with id 73b8fa22e0bf3e1a0cbcf0f619cfafb438bd69add552483dd8d3ac05f537c5df Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.941407 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.942943 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.949162 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-zb64q" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.951848 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.952342 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.952879 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.959160 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.961571 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.998293 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lnq52\" (UniqueName: \"kubernetes.io/projected/d18ec054-49b0-49da-bf27-16a8ac236b5d-kube-api-access-lnq52\") pod \"openstack-galera-0\" (UID: \"d18ec054-49b0-49da-bf27-16a8ac236b5d\") " pod="openstack/openstack-galera-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.998770 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/d18ec054-49b0-49da-bf27-16a8ac236b5d-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"d18ec054-49b0-49da-bf27-16a8ac236b5d\") " pod="openstack/openstack-galera-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.998815 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/d18ec054-49b0-49da-bf27-16a8ac236b5d-config-data-generated\") pod \"openstack-galera-0\" (UID: \"d18ec054-49b0-49da-bf27-16a8ac236b5d\") " pod="openstack/openstack-galera-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.998874 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d18ec054-49b0-49da-bf27-16a8ac236b5d-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"d18ec054-49b0-49da-bf27-16a8ac236b5d\") " pod="openstack/openstack-galera-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.998955 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d18ec054-49b0-49da-bf27-16a8ac236b5d-operator-scripts\") pod \"openstack-galera-0\" (UID: \"d18ec054-49b0-49da-bf27-16a8ac236b5d\") " pod="openstack/openstack-galera-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.999024 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-19a0e75b-4f73-4c46-811e-d2b29971bf9b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-19a0e75b-4f73-4c46-811e-d2b29971bf9b\") pod \"openstack-galera-0\" (UID: \"d18ec054-49b0-49da-bf27-16a8ac236b5d\") " pod="openstack/openstack-galera-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.999065 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d18ec054-49b0-49da-bf27-16a8ac236b5d-kolla-config\") pod \"openstack-galera-0\" (UID: \"d18ec054-49b0-49da-bf27-16a8ac236b5d\") " pod="openstack/openstack-galera-0" Jan 05 23:10:23 crc kubenswrapper[4910]: I0105 23:10:23.999205 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/d18ec054-49b0-49da-bf27-16a8ac236b5d-config-data-default\") pod \"openstack-galera-0\" (UID: \"d18ec054-49b0-49da-bf27-16a8ac236b5d\") " pod="openstack/openstack-galera-0" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.101490 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d18ec054-49b0-49da-bf27-16a8ac236b5d-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"d18ec054-49b0-49da-bf27-16a8ac236b5d\") " pod="openstack/openstack-galera-0" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.101557 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d18ec054-49b0-49da-bf27-16a8ac236b5d-operator-scripts\") pod \"openstack-galera-0\" (UID: \"d18ec054-49b0-49da-bf27-16a8ac236b5d\") " pod="openstack/openstack-galera-0" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.101614 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-19a0e75b-4f73-4c46-811e-d2b29971bf9b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-19a0e75b-4f73-4c46-811e-d2b29971bf9b\") pod \"openstack-galera-0\" (UID: \"d18ec054-49b0-49da-bf27-16a8ac236b5d\") " pod="openstack/openstack-galera-0" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.101657 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d18ec054-49b0-49da-bf27-16a8ac236b5d-kolla-config\") pod \"openstack-galera-0\" (UID: \"d18ec054-49b0-49da-bf27-16a8ac236b5d\") " pod="openstack/openstack-galera-0" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.101710 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/d18ec054-49b0-49da-bf27-16a8ac236b5d-config-data-default\") pod \"openstack-galera-0\" (UID: \"d18ec054-49b0-49da-bf27-16a8ac236b5d\") " pod="openstack/openstack-galera-0" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.101799 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lnq52\" (UniqueName: \"kubernetes.io/projected/d18ec054-49b0-49da-bf27-16a8ac236b5d-kube-api-access-lnq52\") pod \"openstack-galera-0\" (UID: \"d18ec054-49b0-49da-bf27-16a8ac236b5d\") " pod="openstack/openstack-galera-0" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.101837 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/d18ec054-49b0-49da-bf27-16a8ac236b5d-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"d18ec054-49b0-49da-bf27-16a8ac236b5d\") " pod="openstack/openstack-galera-0" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.101869 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/d18ec054-49b0-49da-bf27-16a8ac236b5d-config-data-generated\") pod \"openstack-galera-0\" (UID: \"d18ec054-49b0-49da-bf27-16a8ac236b5d\") " pod="openstack/openstack-galera-0" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.103393 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/d18ec054-49b0-49da-bf27-16a8ac236b5d-config-data-generated\") pod \"openstack-galera-0\" (UID: \"d18ec054-49b0-49da-bf27-16a8ac236b5d\") " pod="openstack/openstack-galera-0" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.103837 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d18ec054-49b0-49da-bf27-16a8ac236b5d-kolla-config\") pod \"openstack-galera-0\" (UID: \"d18ec054-49b0-49da-bf27-16a8ac236b5d\") " pod="openstack/openstack-galera-0" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.105297 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d18ec054-49b0-49da-bf27-16a8ac236b5d-operator-scripts\") pod \"openstack-galera-0\" (UID: \"d18ec054-49b0-49da-bf27-16a8ac236b5d\") " pod="openstack/openstack-galera-0" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.106594 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/d18ec054-49b0-49da-bf27-16a8ac236b5d-config-data-default\") pod \"openstack-galera-0\" (UID: \"d18ec054-49b0-49da-bf27-16a8ac236b5d\") " pod="openstack/openstack-galera-0" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.109881 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d18ec054-49b0-49da-bf27-16a8ac236b5d-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"d18ec054-49b0-49da-bf27-16a8ac236b5d\") " pod="openstack/openstack-galera-0" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.110327 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/d18ec054-49b0-49da-bf27-16a8ac236b5d-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"d18ec054-49b0-49da-bf27-16a8ac236b5d\") " pod="openstack/openstack-galera-0" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.112678 4910 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.112844 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-19a0e75b-4f73-4c46-811e-d2b29971bf9b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-19a0e75b-4f73-4c46-811e-d2b29971bf9b\") pod \"openstack-galera-0\" (UID: \"d18ec054-49b0-49da-bf27-16a8ac236b5d\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/31f7cdceae88c99c8eb7f92c63e8d6be79ec4a39af58d7aba6bba28911e447bc/globalmount\"" pod="openstack/openstack-galera-0" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.126837 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lnq52\" (UniqueName: \"kubernetes.io/projected/d18ec054-49b0-49da-bf27-16a8ac236b5d-kube-api-access-lnq52\") pod \"openstack-galera-0\" (UID: \"d18ec054-49b0-49da-bf27-16a8ac236b5d\") " pod="openstack/openstack-galera-0" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.363765 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.364763 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.367295 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-75zvp" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.368207 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.383733 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.388674 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-19a0e75b-4f73-4c46-811e-d2b29971bf9b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-19a0e75b-4f73-4c46-811e-d2b29971bf9b\") pod \"openstack-galera-0\" (UID: \"d18ec054-49b0-49da-bf27-16a8ac236b5d\") " pod="openstack/openstack-galera-0" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.407506 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/7ab1341d-7501-4be3-aa43-e03cb032084e-kolla-config\") pod \"memcached-0\" (UID: \"7ab1341d-7501-4be3-aa43-e03cb032084e\") " pod="openstack/memcached-0" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.407608 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mgbnx\" (UniqueName: \"kubernetes.io/projected/7ab1341d-7501-4be3-aa43-e03cb032084e-kube-api-access-mgbnx\") pod \"memcached-0\" (UID: \"7ab1341d-7501-4be3-aa43-e03cb032084e\") " pod="openstack/memcached-0" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.407676 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7ab1341d-7501-4be3-aa43-e03cb032084e-config-data\") pod \"memcached-0\" (UID: \"7ab1341d-7501-4be3-aa43-e03cb032084e\") " pod="openstack/memcached-0" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.509481 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7ab1341d-7501-4be3-aa43-e03cb032084e-config-data\") pod \"memcached-0\" (UID: \"7ab1341d-7501-4be3-aa43-e03cb032084e\") " pod="openstack/memcached-0" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.509574 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/7ab1341d-7501-4be3-aa43-e03cb032084e-kolla-config\") pod \"memcached-0\" (UID: \"7ab1341d-7501-4be3-aa43-e03cb032084e\") " pod="openstack/memcached-0" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.509638 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mgbnx\" (UniqueName: \"kubernetes.io/projected/7ab1341d-7501-4be3-aa43-e03cb032084e-kube-api-access-mgbnx\") pod \"memcached-0\" (UID: \"7ab1341d-7501-4be3-aa43-e03cb032084e\") " pod="openstack/memcached-0" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.510481 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7ab1341d-7501-4be3-aa43-e03cb032084e-config-data\") pod \"memcached-0\" (UID: \"7ab1341d-7501-4be3-aa43-e03cb032084e\") " pod="openstack/memcached-0" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.510722 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/7ab1341d-7501-4be3-aa43-e03cb032084e-kolla-config\") pod \"memcached-0\" (UID: \"7ab1341d-7501-4be3-aa43-e03cb032084e\") " pod="openstack/memcached-0" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.578637 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.680424 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mgbnx\" (UniqueName: \"kubernetes.io/projected/7ab1341d-7501-4be3-aa43-e03cb032084e-kube-api-access-mgbnx\") pod \"memcached-0\" (UID: \"7ab1341d-7501-4be3-aa43-e03cb032084e\") " pod="openstack/memcached-0" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.690294 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.778642 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95587bc99-dfhv7" event={"ID":"d7f771bf-44ed-4048-a0a2-891d2fb72d74","Type":"ContainerStarted","Data":"facb3530055f27725b83c80a371e9a2f4d949ff30ff7223c3d0bd130d680c3d3"} Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.779109 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-95587bc99-dfhv7" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.782205 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d79f765b5-s7pxp" event={"ID":"94042c36-e8dd-4a95-9288-e2b3c14d16ed","Type":"ContainerStarted","Data":"4bc772907dbefe979a27f979c62bf54a767d17c631c4875d7846359b62ea60e2"} Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.782618 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5d79f765b5-s7pxp" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.784612 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b4a03100-4353-4d31-815b-2ad6b4286473","Type":"ContainerStarted","Data":"73b8fa22e0bf3e1a0cbcf0f619cfafb438bd69add552483dd8d3ac05f537c5df"} Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.821715 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-95587bc99-dfhv7" podStartSLOduration=3.821695691 podStartE2EDuration="3.821695691s" podCreationTimestamp="2026-01-05 23:10:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:10:24.814347019 +0000 UTC m=+4756.391844689" watchObservedRunningTime="2026-01-05 23:10:24.821695691 +0000 UTC m=+4756.399193361" Jan 05 23:10:24 crc kubenswrapper[4910]: I0105 23:10:24.846535 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5d79f765b5-s7pxp" podStartSLOduration=3.846511593 podStartE2EDuration="3.846511593s" podCreationTimestamp="2026-01-05 23:10:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:10:24.841832208 +0000 UTC m=+4756.419329878" watchObservedRunningTime="2026-01-05 23:10:24.846511593 +0000 UTC m=+4756.424009263" Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.162702 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.248069 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.550090 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.551529 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.554985 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.556226 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-4t8tr" Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.557461 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.558954 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.571577 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.735704 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/898b9f52-035a-4f23-8362-10bbd49da54e-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"898b9f52-035a-4f23-8362-10bbd49da54e\") " pod="openstack/openstack-cell1-galera-0" Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.736293 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/898b9f52-035a-4f23-8362-10bbd49da54e-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"898b9f52-035a-4f23-8362-10bbd49da54e\") " pod="openstack/openstack-cell1-galera-0" Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.736347 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/898b9f52-035a-4f23-8362-10bbd49da54e-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"898b9f52-035a-4f23-8362-10bbd49da54e\") " pod="openstack/openstack-cell1-galera-0" Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.736413 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/898b9f52-035a-4f23-8362-10bbd49da54e-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"898b9f52-035a-4f23-8362-10bbd49da54e\") " pod="openstack/openstack-cell1-galera-0" Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.736722 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/898b9f52-035a-4f23-8362-10bbd49da54e-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"898b9f52-035a-4f23-8362-10bbd49da54e\") " pod="openstack/openstack-cell1-galera-0" Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.736827 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2l87s\" (UniqueName: \"kubernetes.io/projected/898b9f52-035a-4f23-8362-10bbd49da54e-kube-api-access-2l87s\") pod \"openstack-cell1-galera-0\" (UID: \"898b9f52-035a-4f23-8362-10bbd49da54e\") " pod="openstack/openstack-cell1-galera-0" Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.736915 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-41b21a36-fe12-432b-91a5-55f592b837e1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-41b21a36-fe12-432b-91a5-55f592b837e1\") pod \"openstack-cell1-galera-0\" (UID: \"898b9f52-035a-4f23-8362-10bbd49da54e\") " pod="openstack/openstack-cell1-galera-0" Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.736978 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/898b9f52-035a-4f23-8362-10bbd49da54e-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"898b9f52-035a-4f23-8362-10bbd49da54e\") " pod="openstack/openstack-cell1-galera-0" Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.797481 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"fd8c27e8-d4f8-4b4a-b588-36235407cb65","Type":"ContainerStarted","Data":"35d5798419d61ba08fe86e8ff05f292512a662c7db7b41e6936875df72a721b6"} Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.800540 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"d18ec054-49b0-49da-bf27-16a8ac236b5d","Type":"ContainerStarted","Data":"31e976a908b4393e92bebb1f920527033f80549dda30da007a8f7de656c8ab00"} Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.800570 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"d18ec054-49b0-49da-bf27-16a8ac236b5d","Type":"ContainerStarted","Data":"8b9384ac368ef46137b60e71ebaa1a792e582bf3d3985c6e373e3367930e089e"} Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.802511 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"7ab1341d-7501-4be3-aa43-e03cb032084e","Type":"ContainerStarted","Data":"f947dd86aa24d3b1c557a181febca66235b404eef59217f03c431d1007ff6233"} Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.802566 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"7ab1341d-7501-4be3-aa43-e03cb032084e","Type":"ContainerStarted","Data":"e62a65b96ccdb0428bc3f505bac58c4c61b19e717996c75304a589d339f46c00"} Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.803000 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.806937 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b4a03100-4353-4d31-815b-2ad6b4286473","Type":"ContainerStarted","Data":"801ecf70653461d6fb46b27058a2887a58241bacb58255ab7804988977d080cf"} Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.838090 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/898b9f52-035a-4f23-8362-10bbd49da54e-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"898b9f52-035a-4f23-8362-10bbd49da54e\") " pod="openstack/openstack-cell1-galera-0" Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.838188 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2l87s\" (UniqueName: \"kubernetes.io/projected/898b9f52-035a-4f23-8362-10bbd49da54e-kube-api-access-2l87s\") pod \"openstack-cell1-galera-0\" (UID: \"898b9f52-035a-4f23-8362-10bbd49da54e\") " pod="openstack/openstack-cell1-galera-0" Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.838224 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-41b21a36-fe12-432b-91a5-55f592b837e1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-41b21a36-fe12-432b-91a5-55f592b837e1\") pod \"openstack-cell1-galera-0\" (UID: \"898b9f52-035a-4f23-8362-10bbd49da54e\") " pod="openstack/openstack-cell1-galera-0" Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.838249 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/898b9f52-035a-4f23-8362-10bbd49da54e-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"898b9f52-035a-4f23-8362-10bbd49da54e\") " pod="openstack/openstack-cell1-galera-0" Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.838287 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/898b9f52-035a-4f23-8362-10bbd49da54e-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"898b9f52-035a-4f23-8362-10bbd49da54e\") " pod="openstack/openstack-cell1-galera-0" Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.838353 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/898b9f52-035a-4f23-8362-10bbd49da54e-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"898b9f52-035a-4f23-8362-10bbd49da54e\") " pod="openstack/openstack-cell1-galera-0" Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.838373 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/898b9f52-035a-4f23-8362-10bbd49da54e-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"898b9f52-035a-4f23-8362-10bbd49da54e\") " pod="openstack/openstack-cell1-galera-0" Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.838403 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/898b9f52-035a-4f23-8362-10bbd49da54e-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"898b9f52-035a-4f23-8362-10bbd49da54e\") " pod="openstack/openstack-cell1-galera-0" Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.839157 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/898b9f52-035a-4f23-8362-10bbd49da54e-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"898b9f52-035a-4f23-8362-10bbd49da54e\") " pod="openstack/openstack-cell1-galera-0" Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.839333 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/898b9f52-035a-4f23-8362-10bbd49da54e-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"898b9f52-035a-4f23-8362-10bbd49da54e\") " pod="openstack/openstack-cell1-galera-0" Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.840018 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/898b9f52-035a-4f23-8362-10bbd49da54e-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"898b9f52-035a-4f23-8362-10bbd49da54e\") " pod="openstack/openstack-cell1-galera-0" Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.840634 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/898b9f52-035a-4f23-8362-10bbd49da54e-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"898b9f52-035a-4f23-8362-10bbd49da54e\") " pod="openstack/openstack-cell1-galera-0" Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.843251 4910 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.843308 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-41b21a36-fe12-432b-91a5-55f592b837e1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-41b21a36-fe12-432b-91a5-55f592b837e1\") pod \"openstack-cell1-galera-0\" (UID: \"898b9f52-035a-4f23-8362-10bbd49da54e\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/beb3cf7e02edd6a14ed8c9d9e84bfbd6b746d7209a3456fd84de373c6ea3ff25/globalmount\"" pod="openstack/openstack-cell1-galera-0" Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.844904 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/898b9f52-035a-4f23-8362-10bbd49da54e-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"898b9f52-035a-4f23-8362-10bbd49da54e\") " pod="openstack/openstack-cell1-galera-0" Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.857945 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2l87s\" (UniqueName: \"kubernetes.io/projected/898b9f52-035a-4f23-8362-10bbd49da54e-kube-api-access-2l87s\") pod \"openstack-cell1-galera-0\" (UID: \"898b9f52-035a-4f23-8362-10bbd49da54e\") " pod="openstack/openstack-cell1-galera-0" Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.858946 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/898b9f52-035a-4f23-8362-10bbd49da54e-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"898b9f52-035a-4f23-8362-10bbd49da54e\") " pod="openstack/openstack-cell1-galera-0" Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.896863 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=1.896834291 podStartE2EDuration="1.896834291s" podCreationTimestamp="2026-01-05 23:10:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:10:25.893428167 +0000 UTC m=+4757.470925837" watchObservedRunningTime="2026-01-05 23:10:25.896834291 +0000 UTC m=+4757.474331961" Jan 05 23:10:25 crc kubenswrapper[4910]: I0105 23:10:25.903453 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-41b21a36-fe12-432b-91a5-55f592b837e1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-41b21a36-fe12-432b-91a5-55f592b837e1\") pod \"openstack-cell1-galera-0\" (UID: \"898b9f52-035a-4f23-8362-10bbd49da54e\") " pod="openstack/openstack-cell1-galera-0" Jan 05 23:10:26 crc kubenswrapper[4910]: I0105 23:10:26.175472 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 05 23:10:26 crc kubenswrapper[4910]: I0105 23:10:26.461360 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 05 23:10:26 crc kubenswrapper[4910]: W0105 23:10:26.986197 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod898b9f52_035a_4f23_8362_10bbd49da54e.slice/crio-da9ebc6be208b307375d6f007be99660398491797a08a9bf14e7867348dba3ea WatchSource:0}: Error finding container da9ebc6be208b307375d6f007be99660398491797a08a9bf14e7867348dba3ea: Status 404 returned error can't find the container with id da9ebc6be208b307375d6f007be99660398491797a08a9bf14e7867348dba3ea Jan 05 23:10:27 crc kubenswrapper[4910]: I0105 23:10:27.825141 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"898b9f52-035a-4f23-8362-10bbd49da54e","Type":"ContainerStarted","Data":"b9c104bcdc99b1aa87cce0eb8934974587ddc2aea125223a22d0fcb7e49ca084"} Jan 05 23:10:27 crc kubenswrapper[4910]: I0105 23:10:27.826174 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"898b9f52-035a-4f23-8362-10bbd49da54e","Type":"ContainerStarted","Data":"da9ebc6be208b307375d6f007be99660398491797a08a9bf14e7867348dba3ea"} Jan 05 23:10:29 crc kubenswrapper[4910]: I0105 23:10:29.843721 4910 generic.go:334] "Generic (PLEG): container finished" podID="d18ec054-49b0-49da-bf27-16a8ac236b5d" containerID="31e976a908b4393e92bebb1f920527033f80549dda30da007a8f7de656c8ab00" exitCode=0 Jan 05 23:10:29 crc kubenswrapper[4910]: I0105 23:10:29.843854 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"d18ec054-49b0-49da-bf27-16a8ac236b5d","Type":"ContainerDied","Data":"31e976a908b4393e92bebb1f920527033f80549dda30da007a8f7de656c8ab00"} Jan 05 23:10:30 crc kubenswrapper[4910]: I0105 23:10:30.855024 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"d18ec054-49b0-49da-bf27-16a8ac236b5d","Type":"ContainerStarted","Data":"39e3ce02c6257fe0da7516d74cd259b75bf510f53e00f629efe5affdde8d355c"} Jan 05 23:10:30 crc kubenswrapper[4910]: I0105 23:10:30.884647 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=8.884623195 podStartE2EDuration="8.884623195s" podCreationTimestamp="2026-01-05 23:10:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:10:30.876985246 +0000 UTC m=+4762.454482926" watchObservedRunningTime="2026-01-05 23:10:30.884623195 +0000 UTC m=+4762.462120875" Jan 05 23:10:31 crc kubenswrapper[4910]: I0105 23:10:31.798443 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-95587bc99-dfhv7" Jan 05 23:10:31 crc kubenswrapper[4910]: I0105 23:10:31.867208 4910 generic.go:334] "Generic (PLEG): container finished" podID="898b9f52-035a-4f23-8362-10bbd49da54e" containerID="b9c104bcdc99b1aa87cce0eb8934974587ddc2aea125223a22d0fcb7e49ca084" exitCode=0 Jan 05 23:10:31 crc kubenswrapper[4910]: I0105 23:10:31.867269 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"898b9f52-035a-4f23-8362-10bbd49da54e","Type":"ContainerDied","Data":"b9c104bcdc99b1aa87cce0eb8934974587ddc2aea125223a22d0fcb7e49ca084"} Jan 05 23:10:32 crc kubenswrapper[4910]: I0105 23:10:32.193241 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5d79f765b5-s7pxp" Jan 05 23:10:32 crc kubenswrapper[4910]: I0105 23:10:32.309709 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-95587bc99-dfhv7"] Jan 05 23:10:32 crc kubenswrapper[4910]: I0105 23:10:32.309997 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-95587bc99-dfhv7" podUID="d7f771bf-44ed-4048-a0a2-891d2fb72d74" containerName="dnsmasq-dns" containerID="cri-o://facb3530055f27725b83c80a371e9a2f4d949ff30ff7223c3d0bd130d680c3d3" gracePeriod=10 Jan 05 23:10:32 crc kubenswrapper[4910]: I0105 23:10:32.722368 4910 scope.go:117] "RemoveContainer" containerID="a91b2ed1dd18abdbeee2358c6f332ea718c2107406ac7cbb009f035571ce325d" Jan 05 23:10:32 crc kubenswrapper[4910]: E0105 23:10:32.723270 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:10:32 crc kubenswrapper[4910]: I0105 23:10:32.784916 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95587bc99-dfhv7" Jan 05 23:10:32 crc kubenswrapper[4910]: I0105 23:10:32.874594 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d7f771bf-44ed-4048-a0a2-891d2fb72d74-dns-svc\") pod \"d7f771bf-44ed-4048-a0a2-891d2fb72d74\" (UID: \"d7f771bf-44ed-4048-a0a2-891d2fb72d74\") " Jan 05 23:10:32 crc kubenswrapper[4910]: I0105 23:10:32.874698 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8qtpw\" (UniqueName: \"kubernetes.io/projected/d7f771bf-44ed-4048-a0a2-891d2fb72d74-kube-api-access-8qtpw\") pod \"d7f771bf-44ed-4048-a0a2-891d2fb72d74\" (UID: \"d7f771bf-44ed-4048-a0a2-891d2fb72d74\") " Jan 05 23:10:32 crc kubenswrapper[4910]: I0105 23:10:32.874729 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7f771bf-44ed-4048-a0a2-891d2fb72d74-config\") pod \"d7f771bf-44ed-4048-a0a2-891d2fb72d74\" (UID: \"d7f771bf-44ed-4048-a0a2-891d2fb72d74\") " Jan 05 23:10:32 crc kubenswrapper[4910]: I0105 23:10:32.880894 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7f771bf-44ed-4048-a0a2-891d2fb72d74-kube-api-access-8qtpw" (OuterVolumeSpecName: "kube-api-access-8qtpw") pod "d7f771bf-44ed-4048-a0a2-891d2fb72d74" (UID: "d7f771bf-44ed-4048-a0a2-891d2fb72d74"). InnerVolumeSpecName "kube-api-access-8qtpw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:10:32 crc kubenswrapper[4910]: I0105 23:10:32.881142 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"898b9f52-035a-4f23-8362-10bbd49da54e","Type":"ContainerStarted","Data":"088d410a9ad21b841b592ce0b979888fc6fe00e6396f83da4cf796fa7873a0ea"} Jan 05 23:10:32 crc kubenswrapper[4910]: I0105 23:10:32.885147 4910 generic.go:334] "Generic (PLEG): container finished" podID="d7f771bf-44ed-4048-a0a2-891d2fb72d74" containerID="facb3530055f27725b83c80a371e9a2f4d949ff30ff7223c3d0bd130d680c3d3" exitCode=0 Jan 05 23:10:32 crc kubenswrapper[4910]: I0105 23:10:32.885197 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95587bc99-dfhv7" event={"ID":"d7f771bf-44ed-4048-a0a2-891d2fb72d74","Type":"ContainerDied","Data":"facb3530055f27725b83c80a371e9a2f4d949ff30ff7223c3d0bd130d680c3d3"} Jan 05 23:10:32 crc kubenswrapper[4910]: I0105 23:10:32.885231 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95587bc99-dfhv7" event={"ID":"d7f771bf-44ed-4048-a0a2-891d2fb72d74","Type":"ContainerDied","Data":"3e83e96f6bf36f391123c590d1248d8bd9f30f3fd0cdeab7df7ad010639a79dd"} Jan 05 23:10:32 crc kubenswrapper[4910]: I0105 23:10:32.885256 4910 scope.go:117] "RemoveContainer" containerID="facb3530055f27725b83c80a371e9a2f4d949ff30ff7223c3d0bd130d680c3d3" Jan 05 23:10:32 crc kubenswrapper[4910]: I0105 23:10:32.885449 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95587bc99-dfhv7" Jan 05 23:10:32 crc kubenswrapper[4910]: I0105 23:10:32.902470 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=8.902445225 podStartE2EDuration="8.902445225s" podCreationTimestamp="2026-01-05 23:10:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:10:32.901379149 +0000 UTC m=+4764.478876839" watchObservedRunningTime="2026-01-05 23:10:32.902445225 +0000 UTC m=+4764.479942905" Jan 05 23:10:32 crc kubenswrapper[4910]: I0105 23:10:32.912246 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d7f771bf-44ed-4048-a0a2-891d2fb72d74-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d7f771bf-44ed-4048-a0a2-891d2fb72d74" (UID: "d7f771bf-44ed-4048-a0a2-891d2fb72d74"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:10:32 crc kubenswrapper[4910]: I0105 23:10:32.923958 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d7f771bf-44ed-4048-a0a2-891d2fb72d74-config" (OuterVolumeSpecName: "config") pod "d7f771bf-44ed-4048-a0a2-891d2fb72d74" (UID: "d7f771bf-44ed-4048-a0a2-891d2fb72d74"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:10:32 crc kubenswrapper[4910]: I0105 23:10:32.976576 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d7f771bf-44ed-4048-a0a2-891d2fb72d74-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 05 23:10:32 crc kubenswrapper[4910]: I0105 23:10:32.976611 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8qtpw\" (UniqueName: \"kubernetes.io/projected/d7f771bf-44ed-4048-a0a2-891d2fb72d74-kube-api-access-8qtpw\") on node \"crc\" DevicePath \"\"" Jan 05 23:10:32 crc kubenswrapper[4910]: I0105 23:10:32.976625 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7f771bf-44ed-4048-a0a2-891d2fb72d74-config\") on node \"crc\" DevicePath \"\"" Jan 05 23:10:32 crc kubenswrapper[4910]: I0105 23:10:32.995580 4910 scope.go:117] "RemoveContainer" containerID="6c803fdc5db2bc04b32a0565d7e6f7fcc1c7978e1103c00fa3d868e194499047" Jan 05 23:10:33 crc kubenswrapper[4910]: I0105 23:10:33.017097 4910 scope.go:117] "RemoveContainer" containerID="facb3530055f27725b83c80a371e9a2f4d949ff30ff7223c3d0bd130d680c3d3" Jan 05 23:10:33 crc kubenswrapper[4910]: E0105 23:10:33.017709 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"facb3530055f27725b83c80a371e9a2f4d949ff30ff7223c3d0bd130d680c3d3\": container with ID starting with facb3530055f27725b83c80a371e9a2f4d949ff30ff7223c3d0bd130d680c3d3 not found: ID does not exist" containerID="facb3530055f27725b83c80a371e9a2f4d949ff30ff7223c3d0bd130d680c3d3" Jan 05 23:10:33 crc kubenswrapper[4910]: I0105 23:10:33.017749 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"facb3530055f27725b83c80a371e9a2f4d949ff30ff7223c3d0bd130d680c3d3"} err="failed to get container status \"facb3530055f27725b83c80a371e9a2f4d949ff30ff7223c3d0bd130d680c3d3\": rpc error: code = NotFound desc = could not find container \"facb3530055f27725b83c80a371e9a2f4d949ff30ff7223c3d0bd130d680c3d3\": container with ID starting with facb3530055f27725b83c80a371e9a2f4d949ff30ff7223c3d0bd130d680c3d3 not found: ID does not exist" Jan 05 23:10:33 crc kubenswrapper[4910]: I0105 23:10:33.017774 4910 scope.go:117] "RemoveContainer" containerID="6c803fdc5db2bc04b32a0565d7e6f7fcc1c7978e1103c00fa3d868e194499047" Jan 05 23:10:33 crc kubenswrapper[4910]: E0105 23:10:33.018883 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6c803fdc5db2bc04b32a0565d7e6f7fcc1c7978e1103c00fa3d868e194499047\": container with ID starting with 6c803fdc5db2bc04b32a0565d7e6f7fcc1c7978e1103c00fa3d868e194499047 not found: ID does not exist" containerID="6c803fdc5db2bc04b32a0565d7e6f7fcc1c7978e1103c00fa3d868e194499047" Jan 05 23:10:33 crc kubenswrapper[4910]: I0105 23:10:33.019040 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6c803fdc5db2bc04b32a0565d7e6f7fcc1c7978e1103c00fa3d868e194499047"} err="failed to get container status \"6c803fdc5db2bc04b32a0565d7e6f7fcc1c7978e1103c00fa3d868e194499047\": rpc error: code = NotFound desc = could not find container \"6c803fdc5db2bc04b32a0565d7e6f7fcc1c7978e1103c00fa3d868e194499047\": container with ID starting with 6c803fdc5db2bc04b32a0565d7e6f7fcc1c7978e1103c00fa3d868e194499047 not found: ID does not exist" Jan 05 23:10:33 crc kubenswrapper[4910]: I0105 23:10:33.230416 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-95587bc99-dfhv7"] Jan 05 23:10:33 crc kubenswrapper[4910]: I0105 23:10:33.237842 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-95587bc99-dfhv7"] Jan 05 23:10:34 crc kubenswrapper[4910]: I0105 23:10:34.578912 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Jan 05 23:10:34 crc kubenswrapper[4910]: I0105 23:10:34.580759 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Jan 05 23:10:34 crc kubenswrapper[4910]: I0105 23:10:34.692251 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Jan 05 23:10:34 crc kubenswrapper[4910]: I0105 23:10:34.741006 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d7f771bf-44ed-4048-a0a2-891d2fb72d74" path="/var/lib/kubelet/pods/d7f771bf-44ed-4048-a0a2-891d2fb72d74/volumes" Jan 05 23:10:34 crc kubenswrapper[4910]: I0105 23:10:34.741723 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Jan 05 23:10:35 crc kubenswrapper[4910]: I0105 23:10:35.007435 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Jan 05 23:10:36 crc kubenswrapper[4910]: I0105 23:10:36.176292 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Jan 05 23:10:36 crc kubenswrapper[4910]: I0105 23:10:36.176478 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Jan 05 23:10:38 crc kubenswrapper[4910]: I0105 23:10:38.503150 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Jan 05 23:10:38 crc kubenswrapper[4910]: I0105 23:10:38.597926 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Jan 05 23:10:42 crc kubenswrapper[4910]: I0105 23:10:42.945809 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-5j9ms"] Jan 05 23:10:42 crc kubenswrapper[4910]: E0105 23:10:42.948645 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7f771bf-44ed-4048-a0a2-891d2fb72d74" containerName="dnsmasq-dns" Jan 05 23:10:42 crc kubenswrapper[4910]: I0105 23:10:42.948781 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7f771bf-44ed-4048-a0a2-891d2fb72d74" containerName="dnsmasq-dns" Jan 05 23:10:42 crc kubenswrapper[4910]: E0105 23:10:42.948935 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7f771bf-44ed-4048-a0a2-891d2fb72d74" containerName="init" Jan 05 23:10:42 crc kubenswrapper[4910]: I0105 23:10:42.949043 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7f771bf-44ed-4048-a0a2-891d2fb72d74" containerName="init" Jan 05 23:10:42 crc kubenswrapper[4910]: I0105 23:10:42.949418 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7f771bf-44ed-4048-a0a2-891d2fb72d74" containerName="dnsmasq-dns" Jan 05 23:10:42 crc kubenswrapper[4910]: I0105 23:10:42.950537 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-5j9ms" Jan 05 23:10:42 crc kubenswrapper[4910]: I0105 23:10:42.953641 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 05 23:10:42 crc kubenswrapper[4910]: I0105 23:10:42.972009 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-5j9ms"] Jan 05 23:10:43 crc kubenswrapper[4910]: I0105 23:10:43.077408 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwzmx\" (UniqueName: \"kubernetes.io/projected/3ae9ddee-8aee-4078-8954-65d64f8841e0-kube-api-access-wwzmx\") pod \"root-account-create-update-5j9ms\" (UID: \"3ae9ddee-8aee-4078-8954-65d64f8841e0\") " pod="openstack/root-account-create-update-5j9ms" Jan 05 23:10:43 crc kubenswrapper[4910]: I0105 23:10:43.077495 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3ae9ddee-8aee-4078-8954-65d64f8841e0-operator-scripts\") pod \"root-account-create-update-5j9ms\" (UID: \"3ae9ddee-8aee-4078-8954-65d64f8841e0\") " pod="openstack/root-account-create-update-5j9ms" Jan 05 23:10:43 crc kubenswrapper[4910]: I0105 23:10:43.182193 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwzmx\" (UniqueName: \"kubernetes.io/projected/3ae9ddee-8aee-4078-8954-65d64f8841e0-kube-api-access-wwzmx\") pod \"root-account-create-update-5j9ms\" (UID: \"3ae9ddee-8aee-4078-8954-65d64f8841e0\") " pod="openstack/root-account-create-update-5j9ms" Jan 05 23:10:43 crc kubenswrapper[4910]: I0105 23:10:43.183413 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3ae9ddee-8aee-4078-8954-65d64f8841e0-operator-scripts\") pod \"root-account-create-update-5j9ms\" (UID: \"3ae9ddee-8aee-4078-8954-65d64f8841e0\") " pod="openstack/root-account-create-update-5j9ms" Jan 05 23:10:43 crc kubenswrapper[4910]: I0105 23:10:43.184902 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3ae9ddee-8aee-4078-8954-65d64f8841e0-operator-scripts\") pod \"root-account-create-update-5j9ms\" (UID: \"3ae9ddee-8aee-4078-8954-65d64f8841e0\") " pod="openstack/root-account-create-update-5j9ms" Jan 05 23:10:43 crc kubenswrapper[4910]: I0105 23:10:43.217461 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwzmx\" (UniqueName: \"kubernetes.io/projected/3ae9ddee-8aee-4078-8954-65d64f8841e0-kube-api-access-wwzmx\") pod \"root-account-create-update-5j9ms\" (UID: \"3ae9ddee-8aee-4078-8954-65d64f8841e0\") " pod="openstack/root-account-create-update-5j9ms" Jan 05 23:10:43 crc kubenswrapper[4910]: I0105 23:10:43.274784 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-5j9ms" Jan 05 23:10:43 crc kubenswrapper[4910]: I0105 23:10:43.816284 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-5j9ms"] Jan 05 23:10:44 crc kubenswrapper[4910]: I0105 23:10:44.002723 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-5j9ms" event={"ID":"3ae9ddee-8aee-4078-8954-65d64f8841e0","Type":"ContainerStarted","Data":"c7bfbf58245b9f50dd013bac59dbf3f7fc002eac280024e97e2ecbc24b48f97c"} Jan 05 23:10:45 crc kubenswrapper[4910]: I0105 23:10:45.017266 4910 generic.go:334] "Generic (PLEG): container finished" podID="3ae9ddee-8aee-4078-8954-65d64f8841e0" containerID="c54151f6229253593ca70479ea8e470a4d6fd1cbbfd73c9e7950940175c8a418" exitCode=0 Jan 05 23:10:45 crc kubenswrapper[4910]: I0105 23:10:45.017378 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-5j9ms" event={"ID":"3ae9ddee-8aee-4078-8954-65d64f8841e0","Type":"ContainerDied","Data":"c54151f6229253593ca70479ea8e470a4d6fd1cbbfd73c9e7950940175c8a418"} Jan 05 23:10:45 crc kubenswrapper[4910]: I0105 23:10:45.722427 4910 scope.go:117] "RemoveContainer" containerID="a91b2ed1dd18abdbeee2358c6f332ea718c2107406ac7cbb009f035571ce325d" Jan 05 23:10:45 crc kubenswrapper[4910]: E0105 23:10:45.723084 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:10:46 crc kubenswrapper[4910]: I0105 23:10:46.524465 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-5j9ms" Jan 05 23:10:46 crc kubenswrapper[4910]: I0105 23:10:46.648656 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3ae9ddee-8aee-4078-8954-65d64f8841e0-operator-scripts\") pod \"3ae9ddee-8aee-4078-8954-65d64f8841e0\" (UID: \"3ae9ddee-8aee-4078-8954-65d64f8841e0\") " Jan 05 23:10:46 crc kubenswrapper[4910]: I0105 23:10:46.648782 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wwzmx\" (UniqueName: \"kubernetes.io/projected/3ae9ddee-8aee-4078-8954-65d64f8841e0-kube-api-access-wwzmx\") pod \"3ae9ddee-8aee-4078-8954-65d64f8841e0\" (UID: \"3ae9ddee-8aee-4078-8954-65d64f8841e0\") " Jan 05 23:10:46 crc kubenswrapper[4910]: I0105 23:10:46.649710 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ae9ddee-8aee-4078-8954-65d64f8841e0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3ae9ddee-8aee-4078-8954-65d64f8841e0" (UID: "3ae9ddee-8aee-4078-8954-65d64f8841e0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:10:46 crc kubenswrapper[4910]: I0105 23:10:46.660018 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ae9ddee-8aee-4078-8954-65d64f8841e0-kube-api-access-wwzmx" (OuterVolumeSpecName: "kube-api-access-wwzmx") pod "3ae9ddee-8aee-4078-8954-65d64f8841e0" (UID: "3ae9ddee-8aee-4078-8954-65d64f8841e0"). InnerVolumeSpecName "kube-api-access-wwzmx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:10:46 crc kubenswrapper[4910]: I0105 23:10:46.750902 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3ae9ddee-8aee-4078-8954-65d64f8841e0-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:10:46 crc kubenswrapper[4910]: I0105 23:10:46.750976 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wwzmx\" (UniqueName: \"kubernetes.io/projected/3ae9ddee-8aee-4078-8954-65d64f8841e0-kube-api-access-wwzmx\") on node \"crc\" DevicePath \"\"" Jan 05 23:10:47 crc kubenswrapper[4910]: I0105 23:10:47.040496 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-5j9ms" event={"ID":"3ae9ddee-8aee-4078-8954-65d64f8841e0","Type":"ContainerDied","Data":"c7bfbf58245b9f50dd013bac59dbf3f7fc002eac280024e97e2ecbc24b48f97c"} Jan 05 23:10:47 crc kubenswrapper[4910]: I0105 23:10:47.040564 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c7bfbf58245b9f50dd013bac59dbf3f7fc002eac280024e97e2ecbc24b48f97c" Jan 05 23:10:47 crc kubenswrapper[4910]: I0105 23:10:47.040655 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-5j9ms" Jan 05 23:10:49 crc kubenswrapper[4910]: I0105 23:10:49.554395 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-5j9ms"] Jan 05 23:10:49 crc kubenswrapper[4910]: I0105 23:10:49.563652 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-5j9ms"] Jan 05 23:10:50 crc kubenswrapper[4910]: I0105 23:10:50.736558 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ae9ddee-8aee-4078-8954-65d64f8841e0" path="/var/lib/kubelet/pods/3ae9ddee-8aee-4078-8954-65d64f8841e0/volumes" Jan 05 23:10:54 crc kubenswrapper[4910]: I0105 23:10:54.575671 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-lrl2t"] Jan 05 23:10:54 crc kubenswrapper[4910]: E0105 23:10:54.576432 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ae9ddee-8aee-4078-8954-65d64f8841e0" containerName="mariadb-account-create-update" Jan 05 23:10:54 crc kubenswrapper[4910]: I0105 23:10:54.576449 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ae9ddee-8aee-4078-8954-65d64f8841e0" containerName="mariadb-account-create-update" Jan 05 23:10:54 crc kubenswrapper[4910]: I0105 23:10:54.576788 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ae9ddee-8aee-4078-8954-65d64f8841e0" containerName="mariadb-account-create-update" Jan 05 23:10:54 crc kubenswrapper[4910]: I0105 23:10:54.577472 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-lrl2t" Jan 05 23:10:54 crc kubenswrapper[4910]: I0105 23:10:54.580524 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 05 23:10:54 crc kubenswrapper[4910]: I0105 23:10:54.585399 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-lrl2t"] Jan 05 23:10:54 crc kubenswrapper[4910]: I0105 23:10:54.718882 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8jgx\" (UniqueName: \"kubernetes.io/projected/1aa172db-0909-4f70-9a54-b85121c67926-kube-api-access-j8jgx\") pod \"root-account-create-update-lrl2t\" (UID: \"1aa172db-0909-4f70-9a54-b85121c67926\") " pod="openstack/root-account-create-update-lrl2t" Jan 05 23:10:54 crc kubenswrapper[4910]: I0105 23:10:54.720024 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1aa172db-0909-4f70-9a54-b85121c67926-operator-scripts\") pod \"root-account-create-update-lrl2t\" (UID: \"1aa172db-0909-4f70-9a54-b85121c67926\") " pod="openstack/root-account-create-update-lrl2t" Jan 05 23:10:54 crc kubenswrapper[4910]: I0105 23:10:54.821646 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8jgx\" (UniqueName: \"kubernetes.io/projected/1aa172db-0909-4f70-9a54-b85121c67926-kube-api-access-j8jgx\") pod \"root-account-create-update-lrl2t\" (UID: \"1aa172db-0909-4f70-9a54-b85121c67926\") " pod="openstack/root-account-create-update-lrl2t" Jan 05 23:10:54 crc kubenswrapper[4910]: I0105 23:10:54.821879 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1aa172db-0909-4f70-9a54-b85121c67926-operator-scripts\") pod \"root-account-create-update-lrl2t\" (UID: \"1aa172db-0909-4f70-9a54-b85121c67926\") " pod="openstack/root-account-create-update-lrl2t" Jan 05 23:10:54 crc kubenswrapper[4910]: I0105 23:10:54.823442 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1aa172db-0909-4f70-9a54-b85121c67926-operator-scripts\") pod \"root-account-create-update-lrl2t\" (UID: \"1aa172db-0909-4f70-9a54-b85121c67926\") " pod="openstack/root-account-create-update-lrl2t" Jan 05 23:10:54 crc kubenswrapper[4910]: I0105 23:10:54.860005 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8jgx\" (UniqueName: \"kubernetes.io/projected/1aa172db-0909-4f70-9a54-b85121c67926-kube-api-access-j8jgx\") pod \"root-account-create-update-lrl2t\" (UID: \"1aa172db-0909-4f70-9a54-b85121c67926\") " pod="openstack/root-account-create-update-lrl2t" Jan 05 23:10:54 crc kubenswrapper[4910]: I0105 23:10:54.913449 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-lrl2t" Jan 05 23:10:55 crc kubenswrapper[4910]: I0105 23:10:55.264912 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-lrl2t"] Jan 05 23:10:56 crc kubenswrapper[4910]: I0105 23:10:56.158580 4910 generic.go:334] "Generic (PLEG): container finished" podID="1aa172db-0909-4f70-9a54-b85121c67926" containerID="1a5728d0c415528680c8cbfb04df75de89d4c288ef34d4ea5499256e5e827057" exitCode=0 Jan 05 23:10:56 crc kubenswrapper[4910]: I0105 23:10:56.158706 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-lrl2t" event={"ID":"1aa172db-0909-4f70-9a54-b85121c67926","Type":"ContainerDied","Data":"1a5728d0c415528680c8cbfb04df75de89d4c288ef34d4ea5499256e5e827057"} Jan 05 23:10:56 crc kubenswrapper[4910]: I0105 23:10:56.159153 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-lrl2t" event={"ID":"1aa172db-0909-4f70-9a54-b85121c67926","Type":"ContainerStarted","Data":"b9d2f5405cf3d3326f2d3f598b4ac5874278c17f9d96eae923c7ba72acb65eb5"} Jan 05 23:10:57 crc kubenswrapper[4910]: I0105 23:10:57.618716 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-lrl2t" Jan 05 23:10:57 crc kubenswrapper[4910]: I0105 23:10:57.781975 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j8jgx\" (UniqueName: \"kubernetes.io/projected/1aa172db-0909-4f70-9a54-b85121c67926-kube-api-access-j8jgx\") pod \"1aa172db-0909-4f70-9a54-b85121c67926\" (UID: \"1aa172db-0909-4f70-9a54-b85121c67926\") " Jan 05 23:10:57 crc kubenswrapper[4910]: I0105 23:10:57.782202 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1aa172db-0909-4f70-9a54-b85121c67926-operator-scripts\") pod \"1aa172db-0909-4f70-9a54-b85121c67926\" (UID: \"1aa172db-0909-4f70-9a54-b85121c67926\") " Jan 05 23:10:57 crc kubenswrapper[4910]: I0105 23:10:57.783580 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1aa172db-0909-4f70-9a54-b85121c67926-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1aa172db-0909-4f70-9a54-b85121c67926" (UID: "1aa172db-0909-4f70-9a54-b85121c67926"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:10:57 crc kubenswrapper[4910]: I0105 23:10:57.792460 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1aa172db-0909-4f70-9a54-b85121c67926-kube-api-access-j8jgx" (OuterVolumeSpecName: "kube-api-access-j8jgx") pod "1aa172db-0909-4f70-9a54-b85121c67926" (UID: "1aa172db-0909-4f70-9a54-b85121c67926"). InnerVolumeSpecName "kube-api-access-j8jgx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:10:57 crc kubenswrapper[4910]: I0105 23:10:57.883904 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j8jgx\" (UniqueName: \"kubernetes.io/projected/1aa172db-0909-4f70-9a54-b85121c67926-kube-api-access-j8jgx\") on node \"crc\" DevicePath \"\"" Jan 05 23:10:57 crc kubenswrapper[4910]: I0105 23:10:57.883937 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1aa172db-0909-4f70-9a54-b85121c67926-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:10:58 crc kubenswrapper[4910]: I0105 23:10:58.189585 4910 generic.go:334] "Generic (PLEG): container finished" podID="fd8c27e8-d4f8-4b4a-b588-36235407cb65" containerID="35d5798419d61ba08fe86e8ff05f292512a662c7db7b41e6936875df72a721b6" exitCode=0 Jan 05 23:10:58 crc kubenswrapper[4910]: I0105 23:10:58.189708 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"fd8c27e8-d4f8-4b4a-b588-36235407cb65","Type":"ContainerDied","Data":"35d5798419d61ba08fe86e8ff05f292512a662c7db7b41e6936875df72a721b6"} Jan 05 23:10:58 crc kubenswrapper[4910]: I0105 23:10:58.192788 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-lrl2t" event={"ID":"1aa172db-0909-4f70-9a54-b85121c67926","Type":"ContainerDied","Data":"b9d2f5405cf3d3326f2d3f598b4ac5874278c17f9d96eae923c7ba72acb65eb5"} Jan 05 23:10:58 crc kubenswrapper[4910]: I0105 23:10:58.192844 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-lrl2t" Jan 05 23:10:58 crc kubenswrapper[4910]: I0105 23:10:58.192851 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b9d2f5405cf3d3326f2d3f598b4ac5874278c17f9d96eae923c7ba72acb65eb5" Jan 05 23:10:58 crc kubenswrapper[4910]: E0105 23:10:58.534584 4910 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb4a03100_4353_4d31_815b_2ad6b4286473.slice/crio-conmon-801ecf70653461d6fb46b27058a2887a58241bacb58255ab7804988977d080cf.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb4a03100_4353_4d31_815b_2ad6b4286473.slice/crio-801ecf70653461d6fb46b27058a2887a58241bacb58255ab7804988977d080cf.scope\": RecentStats: unable to find data in memory cache]" Jan 05 23:10:58 crc kubenswrapper[4910]: I0105 23:10:58.734078 4910 scope.go:117] "RemoveContainer" containerID="a91b2ed1dd18abdbeee2358c6f332ea718c2107406ac7cbb009f035571ce325d" Jan 05 23:10:58 crc kubenswrapper[4910]: E0105 23:10:58.735023 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:10:59 crc kubenswrapper[4910]: I0105 23:10:59.205841 4910 generic.go:334] "Generic (PLEG): container finished" podID="b4a03100-4353-4d31-815b-2ad6b4286473" containerID="801ecf70653461d6fb46b27058a2887a58241bacb58255ab7804988977d080cf" exitCode=0 Jan 05 23:10:59 crc kubenswrapper[4910]: I0105 23:10:59.205898 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b4a03100-4353-4d31-815b-2ad6b4286473","Type":"ContainerDied","Data":"801ecf70653461d6fb46b27058a2887a58241bacb58255ab7804988977d080cf"} Jan 05 23:10:59 crc kubenswrapper[4910]: I0105 23:10:59.211567 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"fd8c27e8-d4f8-4b4a-b588-36235407cb65","Type":"ContainerStarted","Data":"317a2470eac1f5c10b0c0202db8da7ea29fc33a1ef38b36a67ac3856b2ecee85"} Jan 05 23:10:59 crc kubenswrapper[4910]: I0105 23:10:59.212238 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 05 23:10:59 crc kubenswrapper[4910]: I0105 23:10:59.322521 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=38.322495852 podStartE2EDuration="38.322495852s" podCreationTimestamp="2026-01-05 23:10:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:10:59.311844069 +0000 UTC m=+4790.889341769" watchObservedRunningTime="2026-01-05 23:10:59.322495852 +0000 UTC m=+4790.899993522" Jan 05 23:11:00 crc kubenswrapper[4910]: I0105 23:11:00.223531 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b4a03100-4353-4d31-815b-2ad6b4286473","Type":"ContainerStarted","Data":"5b20fb89f32b09e60147577be6c1686f361bde2ffc0f8a333b53007e87f75dde"} Jan 05 23:11:00 crc kubenswrapper[4910]: I0105 23:11:00.224807 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:00 crc kubenswrapper[4910]: I0105 23:11:00.262556 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=39.262536286 podStartE2EDuration="39.262536286s" podCreationTimestamp="2026-01-05 23:10:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:11:00.247982047 +0000 UTC m=+4791.825479717" watchObservedRunningTime="2026-01-05 23:11:00.262536286 +0000 UTC m=+4791.840033956" Jan 05 23:11:09 crc kubenswrapper[4910]: I0105 23:11:09.722248 4910 scope.go:117] "RemoveContainer" containerID="a91b2ed1dd18abdbeee2358c6f332ea718c2107406ac7cbb009f035571ce325d" Jan 05 23:11:09 crc kubenswrapper[4910]: E0105 23:11:09.723566 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:11:13 crc kubenswrapper[4910]: I0105 23:11:13.023760 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 05 23:11:13 crc kubenswrapper[4910]: I0105 23:11:13.387852 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:13 crc kubenswrapper[4910]: E0105 23:11:13.894771 4910 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.166:58240->38.102.83.166:40365: write tcp 38.102.83.166:58240->38.102.83.166:40365: write: broken pipe Jan 05 23:11:16 crc kubenswrapper[4910]: I0105 23:11:16.258563 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-6gxx7"] Jan 05 23:11:16 crc kubenswrapper[4910]: E0105 23:11:16.259345 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1aa172db-0909-4f70-9a54-b85121c67926" containerName="mariadb-account-create-update" Jan 05 23:11:16 crc kubenswrapper[4910]: I0105 23:11:16.259368 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="1aa172db-0909-4f70-9a54-b85121c67926" containerName="mariadb-account-create-update" Jan 05 23:11:16 crc kubenswrapper[4910]: I0105 23:11:16.259581 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="1aa172db-0909-4f70-9a54-b85121c67926" containerName="mariadb-account-create-update" Jan 05 23:11:16 crc kubenswrapper[4910]: I0105 23:11:16.261045 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6gxx7" Jan 05 23:11:16 crc kubenswrapper[4910]: I0105 23:11:16.287825 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6gxx7"] Jan 05 23:11:16 crc kubenswrapper[4910]: I0105 23:11:16.355637 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d883f70-994d-4c7d-9cb3-04f3a29d2ff8-utilities\") pod \"redhat-marketplace-6gxx7\" (UID: \"1d883f70-994d-4c7d-9cb3-04f3a29d2ff8\") " pod="openshift-marketplace/redhat-marketplace-6gxx7" Jan 05 23:11:16 crc kubenswrapper[4910]: I0105 23:11:16.355841 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d883f70-994d-4c7d-9cb3-04f3a29d2ff8-catalog-content\") pod \"redhat-marketplace-6gxx7\" (UID: \"1d883f70-994d-4c7d-9cb3-04f3a29d2ff8\") " pod="openshift-marketplace/redhat-marketplace-6gxx7" Jan 05 23:11:16 crc kubenswrapper[4910]: I0105 23:11:16.355886 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pmwr5\" (UniqueName: \"kubernetes.io/projected/1d883f70-994d-4c7d-9cb3-04f3a29d2ff8-kube-api-access-pmwr5\") pod \"redhat-marketplace-6gxx7\" (UID: \"1d883f70-994d-4c7d-9cb3-04f3a29d2ff8\") " pod="openshift-marketplace/redhat-marketplace-6gxx7" Jan 05 23:11:16 crc kubenswrapper[4910]: I0105 23:11:16.457238 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d883f70-994d-4c7d-9cb3-04f3a29d2ff8-catalog-content\") pod \"redhat-marketplace-6gxx7\" (UID: \"1d883f70-994d-4c7d-9cb3-04f3a29d2ff8\") " pod="openshift-marketplace/redhat-marketplace-6gxx7" Jan 05 23:11:16 crc kubenswrapper[4910]: I0105 23:11:16.457309 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pmwr5\" (UniqueName: \"kubernetes.io/projected/1d883f70-994d-4c7d-9cb3-04f3a29d2ff8-kube-api-access-pmwr5\") pod \"redhat-marketplace-6gxx7\" (UID: \"1d883f70-994d-4c7d-9cb3-04f3a29d2ff8\") " pod="openshift-marketplace/redhat-marketplace-6gxx7" Jan 05 23:11:16 crc kubenswrapper[4910]: I0105 23:11:16.457425 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d883f70-994d-4c7d-9cb3-04f3a29d2ff8-utilities\") pod \"redhat-marketplace-6gxx7\" (UID: \"1d883f70-994d-4c7d-9cb3-04f3a29d2ff8\") " pod="openshift-marketplace/redhat-marketplace-6gxx7" Jan 05 23:11:16 crc kubenswrapper[4910]: I0105 23:11:16.457861 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d883f70-994d-4c7d-9cb3-04f3a29d2ff8-catalog-content\") pod \"redhat-marketplace-6gxx7\" (UID: \"1d883f70-994d-4c7d-9cb3-04f3a29d2ff8\") " pod="openshift-marketplace/redhat-marketplace-6gxx7" Jan 05 23:11:16 crc kubenswrapper[4910]: I0105 23:11:16.457882 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d883f70-994d-4c7d-9cb3-04f3a29d2ff8-utilities\") pod \"redhat-marketplace-6gxx7\" (UID: \"1d883f70-994d-4c7d-9cb3-04f3a29d2ff8\") " pod="openshift-marketplace/redhat-marketplace-6gxx7" Jan 05 23:11:16 crc kubenswrapper[4910]: I0105 23:11:16.481288 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pmwr5\" (UniqueName: \"kubernetes.io/projected/1d883f70-994d-4c7d-9cb3-04f3a29d2ff8-kube-api-access-pmwr5\") pod \"redhat-marketplace-6gxx7\" (UID: \"1d883f70-994d-4c7d-9cb3-04f3a29d2ff8\") " pod="openshift-marketplace/redhat-marketplace-6gxx7" Jan 05 23:11:16 crc kubenswrapper[4910]: I0105 23:11:16.592685 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6gxx7" Jan 05 23:11:16 crc kubenswrapper[4910]: I0105 23:11:16.898653 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6gxx7"] Jan 05 23:11:16 crc kubenswrapper[4910]: W0105 23:11:16.905524 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1d883f70_994d_4c7d_9cb3_04f3a29d2ff8.slice/crio-c890ea1070d8bedc7ff5c20be3ad04b9b373b7c80b5f7c1d2791033cc545e540 WatchSource:0}: Error finding container c890ea1070d8bedc7ff5c20be3ad04b9b373b7c80b5f7c1d2791033cc545e540: Status 404 returned error can't find the container with id c890ea1070d8bedc7ff5c20be3ad04b9b373b7c80b5f7c1d2791033cc545e540 Jan 05 23:11:17 crc kubenswrapper[4910]: I0105 23:11:17.403770 4910 generic.go:334] "Generic (PLEG): container finished" podID="1d883f70-994d-4c7d-9cb3-04f3a29d2ff8" containerID="05a53dd0096f6d23cdf17464689d9405476246726de7cb28c7371301aaae06cb" exitCode=0 Jan 05 23:11:17 crc kubenswrapper[4910]: I0105 23:11:17.403842 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6gxx7" event={"ID":"1d883f70-994d-4c7d-9cb3-04f3a29d2ff8","Type":"ContainerDied","Data":"05a53dd0096f6d23cdf17464689d9405476246726de7cb28c7371301aaae06cb"} Jan 05 23:11:17 crc kubenswrapper[4910]: I0105 23:11:17.403891 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6gxx7" event={"ID":"1d883f70-994d-4c7d-9cb3-04f3a29d2ff8","Type":"ContainerStarted","Data":"c890ea1070d8bedc7ff5c20be3ad04b9b373b7c80b5f7c1d2791033cc545e540"} Jan 05 23:11:18 crc kubenswrapper[4910]: I0105 23:11:18.413367 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6gxx7" event={"ID":"1d883f70-994d-4c7d-9cb3-04f3a29d2ff8","Type":"ContainerStarted","Data":"b3efa5551bc2c18498e983f2d8bbe94cdb1936cf28af0ba435ef8f8122b44100"} Jan 05 23:11:18 crc kubenswrapper[4910]: I0105 23:11:18.498602 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-699964fbc-9w2bk"] Jan 05 23:11:18 crc kubenswrapper[4910]: I0105 23:11:18.499886 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-699964fbc-9w2bk" Jan 05 23:11:18 crc kubenswrapper[4910]: I0105 23:11:18.518915 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-699964fbc-9w2bk"] Jan 05 23:11:18 crc kubenswrapper[4910]: I0105 23:11:18.595713 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9-config\") pod \"dnsmasq-dns-699964fbc-9w2bk\" (UID: \"8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9\") " pod="openstack/dnsmasq-dns-699964fbc-9w2bk" Jan 05 23:11:18 crc kubenswrapper[4910]: I0105 23:11:18.596239 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9-dns-svc\") pod \"dnsmasq-dns-699964fbc-9w2bk\" (UID: \"8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9\") " pod="openstack/dnsmasq-dns-699964fbc-9w2bk" Jan 05 23:11:18 crc kubenswrapper[4910]: I0105 23:11:18.596295 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmkqf\" (UniqueName: \"kubernetes.io/projected/8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9-kube-api-access-cmkqf\") pod \"dnsmasq-dns-699964fbc-9w2bk\" (UID: \"8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9\") " pod="openstack/dnsmasq-dns-699964fbc-9w2bk" Jan 05 23:11:18 crc kubenswrapper[4910]: I0105 23:11:18.698514 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9-dns-svc\") pod \"dnsmasq-dns-699964fbc-9w2bk\" (UID: \"8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9\") " pod="openstack/dnsmasq-dns-699964fbc-9w2bk" Jan 05 23:11:18 crc kubenswrapper[4910]: I0105 23:11:18.698638 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmkqf\" (UniqueName: \"kubernetes.io/projected/8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9-kube-api-access-cmkqf\") pod \"dnsmasq-dns-699964fbc-9w2bk\" (UID: \"8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9\") " pod="openstack/dnsmasq-dns-699964fbc-9w2bk" Jan 05 23:11:18 crc kubenswrapper[4910]: I0105 23:11:18.698703 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9-config\") pod \"dnsmasq-dns-699964fbc-9w2bk\" (UID: \"8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9\") " pod="openstack/dnsmasq-dns-699964fbc-9w2bk" Jan 05 23:11:18 crc kubenswrapper[4910]: I0105 23:11:18.699924 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9-config\") pod \"dnsmasq-dns-699964fbc-9w2bk\" (UID: \"8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9\") " pod="openstack/dnsmasq-dns-699964fbc-9w2bk" Jan 05 23:11:18 crc kubenswrapper[4910]: I0105 23:11:18.700282 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9-dns-svc\") pod \"dnsmasq-dns-699964fbc-9w2bk\" (UID: \"8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9\") " pod="openstack/dnsmasq-dns-699964fbc-9w2bk" Jan 05 23:11:18 crc kubenswrapper[4910]: I0105 23:11:18.721918 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmkqf\" (UniqueName: \"kubernetes.io/projected/8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9-kube-api-access-cmkqf\") pod \"dnsmasq-dns-699964fbc-9w2bk\" (UID: \"8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9\") " pod="openstack/dnsmasq-dns-699964fbc-9w2bk" Jan 05 23:11:18 crc kubenswrapper[4910]: I0105 23:11:18.862791 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-699964fbc-9w2bk" Jan 05 23:11:19 crc kubenswrapper[4910]: I0105 23:11:19.136682 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 05 23:11:19 crc kubenswrapper[4910]: I0105 23:11:19.425467 4910 generic.go:334] "Generic (PLEG): container finished" podID="1d883f70-994d-4c7d-9cb3-04f3a29d2ff8" containerID="b3efa5551bc2c18498e983f2d8bbe94cdb1936cf28af0ba435ef8f8122b44100" exitCode=0 Jan 05 23:11:19 crc kubenswrapper[4910]: I0105 23:11:19.425527 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6gxx7" event={"ID":"1d883f70-994d-4c7d-9cb3-04f3a29d2ff8","Type":"ContainerDied","Data":"b3efa5551bc2c18498e983f2d8bbe94cdb1936cf28af0ba435ef8f8122b44100"} Jan 05 23:11:19 crc kubenswrapper[4910]: W0105 23:11:19.434898 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8c0b90d1_eaaa_4555_898a_5c8abbb6d9a9.slice/crio-ca7300e229683eb8c1eca90f8dae76e94a5629e4f770872c0f5eb9ccb9adf26f WatchSource:0}: Error finding container ca7300e229683eb8c1eca90f8dae76e94a5629e4f770872c0f5eb9ccb9adf26f: Status 404 returned error can't find the container with id ca7300e229683eb8c1eca90f8dae76e94a5629e4f770872c0f5eb9ccb9adf26f Jan 05 23:11:19 crc kubenswrapper[4910]: I0105 23:11:19.444661 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-699964fbc-9w2bk"] Jan 05 23:11:20 crc kubenswrapper[4910]: I0105 23:11:20.110575 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 05 23:11:20 crc kubenswrapper[4910]: I0105 23:11:20.437809 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6gxx7" event={"ID":"1d883f70-994d-4c7d-9cb3-04f3a29d2ff8","Type":"ContainerStarted","Data":"28bc7eff89bc6a90b8d7212536e4bb5237ab6d8177a54c919e639fe0502120c0"} Jan 05 23:11:20 crc kubenswrapper[4910]: I0105 23:11:20.439822 4910 generic.go:334] "Generic (PLEG): container finished" podID="8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9" containerID="93e4c55f9177b116015994dd22fa2a6c729c45fe087b2ff6b7d8b4c984512294" exitCode=0 Jan 05 23:11:20 crc kubenswrapper[4910]: I0105 23:11:20.439875 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-699964fbc-9w2bk" event={"ID":"8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9","Type":"ContainerDied","Data":"93e4c55f9177b116015994dd22fa2a6c729c45fe087b2ff6b7d8b4c984512294"} Jan 05 23:11:20 crc kubenswrapper[4910]: I0105 23:11:20.439913 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-699964fbc-9w2bk" event={"ID":"8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9","Type":"ContainerStarted","Data":"ca7300e229683eb8c1eca90f8dae76e94a5629e4f770872c0f5eb9ccb9adf26f"} Jan 05 23:11:20 crc kubenswrapper[4910]: I0105 23:11:20.527169 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-6gxx7" podStartSLOduration=2.022581089 podStartE2EDuration="4.527148111s" podCreationTimestamp="2026-01-05 23:11:16 +0000 UTC" firstStartedPulling="2026-01-05 23:11:17.406930087 +0000 UTC m=+4808.984427797" lastFinishedPulling="2026-01-05 23:11:19.911497149 +0000 UTC m=+4811.488994819" observedRunningTime="2026-01-05 23:11:20.471431735 +0000 UTC m=+4812.048929405" watchObservedRunningTime="2026-01-05 23:11:20.527148111 +0000 UTC m=+4812.104645781" Jan 05 23:11:21 crc kubenswrapper[4910]: I0105 23:11:21.180325 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="fd8c27e8-d4f8-4b4a-b588-36235407cb65" containerName="rabbitmq" containerID="cri-o://317a2470eac1f5c10b0c0202db8da7ea29fc33a1ef38b36a67ac3856b2ecee85" gracePeriod=604798 Jan 05 23:11:21 crc kubenswrapper[4910]: I0105 23:11:21.456193 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-699964fbc-9w2bk" event={"ID":"8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9","Type":"ContainerStarted","Data":"1eb072a223ea45ddd20773726c08a60e0e17f224849fe23f180e06bb599671c8"} Jan 05 23:11:21 crc kubenswrapper[4910]: I0105 23:11:21.456310 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-699964fbc-9w2bk" Jan 05 23:11:21 crc kubenswrapper[4910]: I0105 23:11:21.483924 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-699964fbc-9w2bk" podStartSLOduration=3.483903409 podStartE2EDuration="3.483903409s" podCreationTimestamp="2026-01-05 23:11:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:11:21.47343216 +0000 UTC m=+4813.050929840" watchObservedRunningTime="2026-01-05 23:11:21.483903409 +0000 UTC m=+4813.061401089" Jan 05 23:11:21 crc kubenswrapper[4910]: I0105 23:11:21.722311 4910 scope.go:117] "RemoveContainer" containerID="a91b2ed1dd18abdbeee2358c6f332ea718c2107406ac7cbb009f035571ce325d" Jan 05 23:11:21 crc kubenswrapper[4910]: E0105 23:11:21.722587 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:11:22 crc kubenswrapper[4910]: I0105 23:11:22.220431 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="b4a03100-4353-4d31-815b-2ad6b4286473" containerName="rabbitmq" containerID="cri-o://5b20fb89f32b09e60147577be6c1686f361bde2ffc0f8a333b53007e87f75dde" gracePeriod=604798 Jan 05 23:11:23 crc kubenswrapper[4910]: I0105 23:11:23.019809 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="fd8c27e8-d4f8-4b4a-b588-36235407cb65" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.241:5672: connect: connection refused" Jan 05 23:11:23 crc kubenswrapper[4910]: I0105 23:11:23.385295 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="b4a03100-4353-4d31-815b-2ad6b4286473" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.242:5672: connect: connection refused" Jan 05 23:11:26 crc kubenswrapper[4910]: I0105 23:11:26.593237 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-6gxx7" Jan 05 23:11:26 crc kubenswrapper[4910]: I0105 23:11:26.593699 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-6gxx7" Jan 05 23:11:26 crc kubenswrapper[4910]: I0105 23:11:26.648313 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-6gxx7" Jan 05 23:11:27 crc kubenswrapper[4910]: I0105 23:11:27.599278 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-6gxx7" Jan 05 23:11:27 crc kubenswrapper[4910]: I0105 23:11:27.708474 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6gxx7"] Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.144284 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.199615 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/fd8c27e8-d4f8-4b4a-b588-36235407cb65-plugins-conf\") pod \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.199687 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/fd8c27e8-d4f8-4b4a-b588-36235407cb65-rabbitmq-plugins\") pod \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.199726 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/fd8c27e8-d4f8-4b4a-b588-36235407cb65-rabbitmq-erlang-cookie\") pod \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.199787 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4ch22\" (UniqueName: \"kubernetes.io/projected/fd8c27e8-d4f8-4b4a-b588-36235407cb65-kube-api-access-4ch22\") pod \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.199912 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/fd8c27e8-d4f8-4b4a-b588-36235407cb65-erlang-cookie-secret\") pod \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.200201 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c980509f-4bcd-48d3-bedd-68cca7857d01\") pod \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.200250 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/fd8c27e8-d4f8-4b4a-b588-36235407cb65-pod-info\") pod \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.200275 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/fd8c27e8-d4f8-4b4a-b588-36235407cb65-server-conf\") pod \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.200322 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/fd8c27e8-d4f8-4b4a-b588-36235407cb65-rabbitmq-confd\") pod \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\" (UID: \"fd8c27e8-d4f8-4b4a-b588-36235407cb65\") " Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.202187 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd8c27e8-d4f8-4b4a-b588-36235407cb65-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "fd8c27e8-d4f8-4b4a-b588-36235407cb65" (UID: "fd8c27e8-d4f8-4b4a-b588-36235407cb65"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.202220 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fd8c27e8-d4f8-4b4a-b588-36235407cb65-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "fd8c27e8-d4f8-4b4a-b588-36235407cb65" (UID: "fd8c27e8-d4f8-4b4a-b588-36235407cb65"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.202676 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd8c27e8-d4f8-4b4a-b588-36235407cb65-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "fd8c27e8-d4f8-4b4a-b588-36235407cb65" (UID: "fd8c27e8-d4f8-4b4a-b588-36235407cb65"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.235368 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/fd8c27e8-d4f8-4b4a-b588-36235407cb65-pod-info" (OuterVolumeSpecName: "pod-info") pod "fd8c27e8-d4f8-4b4a-b588-36235407cb65" (UID: "fd8c27e8-d4f8-4b4a-b588-36235407cb65"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.240446 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd8c27e8-d4f8-4b4a-b588-36235407cb65-kube-api-access-4ch22" (OuterVolumeSpecName: "kube-api-access-4ch22") pod "fd8c27e8-d4f8-4b4a-b588-36235407cb65" (UID: "fd8c27e8-d4f8-4b4a-b588-36235407cb65"). InnerVolumeSpecName "kube-api-access-4ch22". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.240757 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fd8c27e8-d4f8-4b4a-b588-36235407cb65-server-conf" (OuterVolumeSpecName: "server-conf") pod "fd8c27e8-d4f8-4b4a-b588-36235407cb65" (UID: "fd8c27e8-d4f8-4b4a-b588-36235407cb65"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.240882 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c980509f-4bcd-48d3-bedd-68cca7857d01" (OuterVolumeSpecName: "persistence") pod "fd8c27e8-d4f8-4b4a-b588-36235407cb65" (UID: "fd8c27e8-d4f8-4b4a-b588-36235407cb65"). InnerVolumeSpecName "pvc-c980509f-4bcd-48d3-bedd-68cca7857d01". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.249459 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd8c27e8-d4f8-4b4a-b588-36235407cb65-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "fd8c27e8-d4f8-4b4a-b588-36235407cb65" (UID: "fd8c27e8-d4f8-4b4a-b588-36235407cb65"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.302383 4910 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/fd8c27e8-d4f8-4b4a-b588-36235407cb65-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.302459 4910 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-c980509f-4bcd-48d3-bedd-68cca7857d01\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c980509f-4bcd-48d3-bedd-68cca7857d01\") on node \"crc\" " Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.302476 4910 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/fd8c27e8-d4f8-4b4a-b588-36235407cb65-pod-info\") on node \"crc\" DevicePath \"\"" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.302489 4910 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/fd8c27e8-d4f8-4b4a-b588-36235407cb65-server-conf\") on node \"crc\" DevicePath \"\"" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.302503 4910 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/fd8c27e8-d4f8-4b4a-b588-36235407cb65-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.302516 4910 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/fd8c27e8-d4f8-4b4a-b588-36235407cb65-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.302528 4910 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/fd8c27e8-d4f8-4b4a-b588-36235407cb65-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.302542 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4ch22\" (UniqueName: \"kubernetes.io/projected/fd8c27e8-d4f8-4b4a-b588-36235407cb65-kube-api-access-4ch22\") on node \"crc\" DevicePath \"\"" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.327798 4910 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.327975 4910 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-c980509f-4bcd-48d3-bedd-68cca7857d01" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c980509f-4bcd-48d3-bedd-68cca7857d01") on node "crc" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.334610 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd8c27e8-d4f8-4b4a-b588-36235407cb65-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "fd8c27e8-d4f8-4b4a-b588-36235407cb65" (UID: "fd8c27e8-d4f8-4b4a-b588-36235407cb65"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.405258 4910 reconciler_common.go:293] "Volume detached for volume \"pvc-c980509f-4bcd-48d3-bedd-68cca7857d01\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c980509f-4bcd-48d3-bedd-68cca7857d01\") on node \"crc\" DevicePath \"\"" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.405306 4910 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/fd8c27e8-d4f8-4b4a-b588-36235407cb65-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.526365 4910 generic.go:334] "Generic (PLEG): container finished" podID="fd8c27e8-d4f8-4b4a-b588-36235407cb65" containerID="317a2470eac1f5c10b0c0202db8da7ea29fc33a1ef38b36a67ac3856b2ecee85" exitCode=0 Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.526506 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.526469 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"fd8c27e8-d4f8-4b4a-b588-36235407cb65","Type":"ContainerDied","Data":"317a2470eac1f5c10b0c0202db8da7ea29fc33a1ef38b36a67ac3856b2ecee85"} Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.527367 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"fd8c27e8-d4f8-4b4a-b588-36235407cb65","Type":"ContainerDied","Data":"1979c02a32ff231970eb4edeed22d749a41dbdcfb8664ce89604e8e3560bb0b1"} Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.527418 4910 scope.go:117] "RemoveContainer" containerID="317a2470eac1f5c10b0c0202db8da7ea29fc33a1ef38b36a67ac3856b2ecee85" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.532906 4910 generic.go:334] "Generic (PLEG): container finished" podID="b4a03100-4353-4d31-815b-2ad6b4286473" containerID="5b20fb89f32b09e60147577be6c1686f361bde2ffc0f8a333b53007e87f75dde" exitCode=0 Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.533541 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b4a03100-4353-4d31-815b-2ad6b4286473","Type":"ContainerDied","Data":"5b20fb89f32b09e60147577be6c1686f361bde2ffc0f8a333b53007e87f75dde"} Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.556086 4910 scope.go:117] "RemoveContainer" containerID="35d5798419d61ba08fe86e8ff05f292512a662c7db7b41e6936875df72a721b6" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.580548 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.589495 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.590060 4910 scope.go:117] "RemoveContainer" containerID="317a2470eac1f5c10b0c0202db8da7ea29fc33a1ef38b36a67ac3856b2ecee85" Jan 05 23:11:28 crc kubenswrapper[4910]: E0105 23:11:28.591853 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"317a2470eac1f5c10b0c0202db8da7ea29fc33a1ef38b36a67ac3856b2ecee85\": container with ID starting with 317a2470eac1f5c10b0c0202db8da7ea29fc33a1ef38b36a67ac3856b2ecee85 not found: ID does not exist" containerID="317a2470eac1f5c10b0c0202db8da7ea29fc33a1ef38b36a67ac3856b2ecee85" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.591899 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"317a2470eac1f5c10b0c0202db8da7ea29fc33a1ef38b36a67ac3856b2ecee85"} err="failed to get container status \"317a2470eac1f5c10b0c0202db8da7ea29fc33a1ef38b36a67ac3856b2ecee85\": rpc error: code = NotFound desc = could not find container \"317a2470eac1f5c10b0c0202db8da7ea29fc33a1ef38b36a67ac3856b2ecee85\": container with ID starting with 317a2470eac1f5c10b0c0202db8da7ea29fc33a1ef38b36a67ac3856b2ecee85 not found: ID does not exist" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.591940 4910 scope.go:117] "RemoveContainer" containerID="35d5798419d61ba08fe86e8ff05f292512a662c7db7b41e6936875df72a721b6" Jan 05 23:11:28 crc kubenswrapper[4910]: E0105 23:11:28.592522 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"35d5798419d61ba08fe86e8ff05f292512a662c7db7b41e6936875df72a721b6\": container with ID starting with 35d5798419d61ba08fe86e8ff05f292512a662c7db7b41e6936875df72a721b6 not found: ID does not exist" containerID="35d5798419d61ba08fe86e8ff05f292512a662c7db7b41e6936875df72a721b6" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.592594 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"35d5798419d61ba08fe86e8ff05f292512a662c7db7b41e6936875df72a721b6"} err="failed to get container status \"35d5798419d61ba08fe86e8ff05f292512a662c7db7b41e6936875df72a721b6\": rpc error: code = NotFound desc = could not find container \"35d5798419d61ba08fe86e8ff05f292512a662c7db7b41e6936875df72a721b6\": container with ID starting with 35d5798419d61ba08fe86e8ff05f292512a662c7db7b41e6936875df72a721b6 not found: ID does not exist" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.618019 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 05 23:11:28 crc kubenswrapper[4910]: E0105 23:11:28.618501 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd8c27e8-d4f8-4b4a-b588-36235407cb65" containerName="rabbitmq" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.618519 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd8c27e8-d4f8-4b4a-b588-36235407cb65" containerName="rabbitmq" Jan 05 23:11:28 crc kubenswrapper[4910]: E0105 23:11:28.618553 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd8c27e8-d4f8-4b4a-b588-36235407cb65" containerName="setup-container" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.618564 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd8c27e8-d4f8-4b4a-b588-36235407cb65" containerName="setup-container" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.618762 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd8c27e8-d4f8-4b4a-b588-36235407cb65" containerName="rabbitmq" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.619884 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.624007 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.624085 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.624375 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-p2qqm" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.624483 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.625628 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.658367 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.712307 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-c980509f-4bcd-48d3-bedd-68cca7857d01\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c980509f-4bcd-48d3-bedd-68cca7857d01\") pod \"rabbitmq-server-0\" (UID: \"395698cd-ffef-4d1f-959f-39c54c8b76f8\") " pod="openstack/rabbitmq-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.712405 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/395698cd-ffef-4d1f-959f-39c54c8b76f8-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"395698cd-ffef-4d1f-959f-39c54c8b76f8\") " pod="openstack/rabbitmq-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.712447 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/395698cd-ffef-4d1f-959f-39c54c8b76f8-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"395698cd-ffef-4d1f-959f-39c54c8b76f8\") " pod="openstack/rabbitmq-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.712482 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/395698cd-ffef-4d1f-959f-39c54c8b76f8-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"395698cd-ffef-4d1f-959f-39c54c8b76f8\") " pod="openstack/rabbitmq-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.712514 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kndrj\" (UniqueName: \"kubernetes.io/projected/395698cd-ffef-4d1f-959f-39c54c8b76f8-kube-api-access-kndrj\") pod \"rabbitmq-server-0\" (UID: \"395698cd-ffef-4d1f-959f-39c54c8b76f8\") " pod="openstack/rabbitmq-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.712544 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/395698cd-ffef-4d1f-959f-39c54c8b76f8-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"395698cd-ffef-4d1f-959f-39c54c8b76f8\") " pod="openstack/rabbitmq-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.712581 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/395698cd-ffef-4d1f-959f-39c54c8b76f8-server-conf\") pod \"rabbitmq-server-0\" (UID: \"395698cd-ffef-4d1f-959f-39c54c8b76f8\") " pod="openstack/rabbitmq-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.712606 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/395698cd-ffef-4d1f-959f-39c54c8b76f8-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"395698cd-ffef-4d1f-959f-39c54c8b76f8\") " pod="openstack/rabbitmq-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.712629 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/395698cd-ffef-4d1f-959f-39c54c8b76f8-pod-info\") pod \"rabbitmq-server-0\" (UID: \"395698cd-ffef-4d1f-959f-39c54c8b76f8\") " pod="openstack/rabbitmq-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.734161 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd8c27e8-d4f8-4b4a-b588-36235407cb65" path="/var/lib/kubelet/pods/fd8c27e8-d4f8-4b4a-b588-36235407cb65/volumes" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.814472 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-c980509f-4bcd-48d3-bedd-68cca7857d01\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c980509f-4bcd-48d3-bedd-68cca7857d01\") pod \"rabbitmq-server-0\" (UID: \"395698cd-ffef-4d1f-959f-39c54c8b76f8\") " pod="openstack/rabbitmq-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.814574 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/395698cd-ffef-4d1f-959f-39c54c8b76f8-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"395698cd-ffef-4d1f-959f-39c54c8b76f8\") " pod="openstack/rabbitmq-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.814608 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/395698cd-ffef-4d1f-959f-39c54c8b76f8-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"395698cd-ffef-4d1f-959f-39c54c8b76f8\") " pod="openstack/rabbitmq-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.814640 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/395698cd-ffef-4d1f-959f-39c54c8b76f8-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"395698cd-ffef-4d1f-959f-39c54c8b76f8\") " pod="openstack/rabbitmq-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.814671 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kndrj\" (UniqueName: \"kubernetes.io/projected/395698cd-ffef-4d1f-959f-39c54c8b76f8-kube-api-access-kndrj\") pod \"rabbitmq-server-0\" (UID: \"395698cd-ffef-4d1f-959f-39c54c8b76f8\") " pod="openstack/rabbitmq-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.814697 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/395698cd-ffef-4d1f-959f-39c54c8b76f8-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"395698cd-ffef-4d1f-959f-39c54c8b76f8\") " pod="openstack/rabbitmq-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.814732 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/395698cd-ffef-4d1f-959f-39c54c8b76f8-server-conf\") pod \"rabbitmq-server-0\" (UID: \"395698cd-ffef-4d1f-959f-39c54c8b76f8\") " pod="openstack/rabbitmq-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.814751 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/395698cd-ffef-4d1f-959f-39c54c8b76f8-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"395698cd-ffef-4d1f-959f-39c54c8b76f8\") " pod="openstack/rabbitmq-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.814769 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/395698cd-ffef-4d1f-959f-39c54c8b76f8-pod-info\") pod \"rabbitmq-server-0\" (UID: \"395698cd-ffef-4d1f-959f-39c54c8b76f8\") " pod="openstack/rabbitmq-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.817295 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/395698cd-ffef-4d1f-959f-39c54c8b76f8-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"395698cd-ffef-4d1f-959f-39c54c8b76f8\") " pod="openstack/rabbitmq-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.817390 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/395698cd-ffef-4d1f-959f-39c54c8b76f8-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"395698cd-ffef-4d1f-959f-39c54c8b76f8\") " pod="openstack/rabbitmq-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.818708 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/395698cd-ffef-4d1f-959f-39c54c8b76f8-server-conf\") pod \"rabbitmq-server-0\" (UID: \"395698cd-ffef-4d1f-959f-39c54c8b76f8\") " pod="openstack/rabbitmq-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.819077 4910 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.819167 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-c980509f-4bcd-48d3-bedd-68cca7857d01\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c980509f-4bcd-48d3-bedd-68cca7857d01\") pod \"rabbitmq-server-0\" (UID: \"395698cd-ffef-4d1f-959f-39c54c8b76f8\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/aa87ac3f633e281ec836a753a99a3c6b975434127cedb14ddd1639acff6aaee3/globalmount\"" pod="openstack/rabbitmq-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.819066 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/395698cd-ffef-4d1f-959f-39c54c8b76f8-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"395698cd-ffef-4d1f-959f-39c54c8b76f8\") " pod="openstack/rabbitmq-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.821017 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/395698cd-ffef-4d1f-959f-39c54c8b76f8-pod-info\") pod \"rabbitmq-server-0\" (UID: \"395698cd-ffef-4d1f-959f-39c54c8b76f8\") " pod="openstack/rabbitmq-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.821566 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/395698cd-ffef-4d1f-959f-39c54c8b76f8-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"395698cd-ffef-4d1f-959f-39c54c8b76f8\") " pod="openstack/rabbitmq-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.826257 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/395698cd-ffef-4d1f-959f-39c54c8b76f8-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"395698cd-ffef-4d1f-959f-39c54c8b76f8\") " pod="openstack/rabbitmq-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.835901 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kndrj\" (UniqueName: \"kubernetes.io/projected/395698cd-ffef-4d1f-959f-39c54c8b76f8-kube-api-access-kndrj\") pod \"rabbitmq-server-0\" (UID: \"395698cd-ffef-4d1f-959f-39c54c8b76f8\") " pod="openstack/rabbitmq-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.853224 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-c980509f-4bcd-48d3-bedd-68cca7857d01\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c980509f-4bcd-48d3-bedd-68cca7857d01\") pod \"rabbitmq-server-0\" (UID: \"395698cd-ffef-4d1f-959f-39c54c8b76f8\") " pod="openstack/rabbitmq-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.864253 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-699964fbc-9w2bk" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.879063 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.916262 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0be5cbae-d82d-4bda-9521-eac234c98e49\") pod \"b4a03100-4353-4d31-815b-2ad6b4286473\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.916325 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b4a03100-4353-4d31-815b-2ad6b4286473-rabbitmq-erlang-cookie\") pod \"b4a03100-4353-4d31-815b-2ad6b4286473\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.916393 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b4a03100-4353-4d31-815b-2ad6b4286473-plugins-conf\") pod \"b4a03100-4353-4d31-815b-2ad6b4286473\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.916458 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b4a03100-4353-4d31-815b-2ad6b4286473-rabbitmq-confd\") pod \"b4a03100-4353-4d31-815b-2ad6b4286473\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.916537 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bjdhf\" (UniqueName: \"kubernetes.io/projected/b4a03100-4353-4d31-815b-2ad6b4286473-kube-api-access-bjdhf\") pod \"b4a03100-4353-4d31-815b-2ad6b4286473\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.916630 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b4a03100-4353-4d31-815b-2ad6b4286473-rabbitmq-plugins\") pod \"b4a03100-4353-4d31-815b-2ad6b4286473\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.916681 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b4a03100-4353-4d31-815b-2ad6b4286473-erlang-cookie-secret\") pod \"b4a03100-4353-4d31-815b-2ad6b4286473\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.916783 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b4a03100-4353-4d31-815b-2ad6b4286473-server-conf\") pod \"b4a03100-4353-4d31-815b-2ad6b4286473\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.916807 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b4a03100-4353-4d31-815b-2ad6b4286473-pod-info\") pod \"b4a03100-4353-4d31-815b-2ad6b4286473\" (UID: \"b4a03100-4353-4d31-815b-2ad6b4286473\") " Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.917293 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b4a03100-4353-4d31-815b-2ad6b4286473-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "b4a03100-4353-4d31-815b-2ad6b4286473" (UID: "b4a03100-4353-4d31-815b-2ad6b4286473"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.917424 4910 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b4a03100-4353-4d31-815b-2ad6b4286473-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.918492 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b4a03100-4353-4d31-815b-2ad6b4286473-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "b4a03100-4353-4d31-815b-2ad6b4286473" (UID: "b4a03100-4353-4d31-815b-2ad6b4286473"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.920661 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4a03100-4353-4d31-815b-2ad6b4286473-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "b4a03100-4353-4d31-815b-2ad6b4286473" (UID: "b4a03100-4353-4d31-815b-2ad6b4286473"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.951274 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/b4a03100-4353-4d31-815b-2ad6b4286473-pod-info" (OuterVolumeSpecName: "pod-info") pod "b4a03100-4353-4d31-815b-2ad6b4286473" (UID: "b4a03100-4353-4d31-815b-2ad6b4286473"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.953107 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4a03100-4353-4d31-815b-2ad6b4286473-kube-api-access-bjdhf" (OuterVolumeSpecName: "kube-api-access-bjdhf") pod "b4a03100-4353-4d31-815b-2ad6b4286473" (UID: "b4a03100-4353-4d31-815b-2ad6b4286473"). InnerVolumeSpecName "kube-api-access-bjdhf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.957313 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4a03100-4353-4d31-815b-2ad6b4286473-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "b4a03100-4353-4d31-815b-2ad6b4286473" (UID: "b4a03100-4353-4d31-815b-2ad6b4286473"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.980908 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5d79f765b5-s7pxp"] Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.981291 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5d79f765b5-s7pxp" podUID="94042c36-e8dd-4a95-9288-e2b3c14d16ed" containerName="dnsmasq-dns" containerID="cri-o://4bc772907dbefe979a27f979c62bf54a767d17c631c4875d7846359b62ea60e2" gracePeriod=10 Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.994466 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.994540 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4a03100-4353-4d31-815b-2ad6b4286473-server-conf" (OuterVolumeSpecName: "server-conf") pod "b4a03100-4353-4d31-815b-2ad6b4286473" (UID: "b4a03100-4353-4d31-815b-2ad6b4286473"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:11:28 crc kubenswrapper[4910]: I0105 23:11:28.994568 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0be5cbae-d82d-4bda-9521-eac234c98e49" (OuterVolumeSpecName: "persistence") pod "b4a03100-4353-4d31-815b-2ad6b4286473" (UID: "b4a03100-4353-4d31-815b-2ad6b4286473"). InnerVolumeSpecName "pvc-0be5cbae-d82d-4bda-9521-eac234c98e49". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.021362 4910 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b4a03100-4353-4d31-815b-2ad6b4286473-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.021424 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bjdhf\" (UniqueName: \"kubernetes.io/projected/b4a03100-4353-4d31-815b-2ad6b4286473-kube-api-access-bjdhf\") on node \"crc\" DevicePath \"\"" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.021439 4910 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b4a03100-4353-4d31-815b-2ad6b4286473-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.021455 4910 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b4a03100-4353-4d31-815b-2ad6b4286473-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.021467 4910 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b4a03100-4353-4d31-815b-2ad6b4286473-pod-info\") on node \"crc\" DevicePath \"\"" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.021483 4910 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b4a03100-4353-4d31-815b-2ad6b4286473-server-conf\") on node \"crc\" DevicePath \"\"" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.021522 4910 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-0be5cbae-d82d-4bda-9521-eac234c98e49\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0be5cbae-d82d-4bda-9521-eac234c98e49\") on node \"crc\" " Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.060491 4910 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.061569 4910 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-0be5cbae-d82d-4bda-9521-eac234c98e49" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0be5cbae-d82d-4bda-9521-eac234c98e49") on node "crc" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.093410 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4a03100-4353-4d31-815b-2ad6b4286473-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "b4a03100-4353-4d31-815b-2ad6b4286473" (UID: "b4a03100-4353-4d31-815b-2ad6b4286473"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.122592 4910 reconciler_common.go:293] "Volume detached for volume \"pvc-0be5cbae-d82d-4bda-9521-eac234c98e49\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0be5cbae-d82d-4bda-9521-eac234c98e49\") on node \"crc\" DevicePath \"\"" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.122632 4910 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b4a03100-4353-4d31-815b-2ad6b4286473-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 05 23:11:29 crc kubenswrapper[4910]: E0105 23:11:29.240375 4910 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod94042c36_e8dd_4a95_9288_e2b3c14d16ed.slice/crio-conmon-4bc772907dbefe979a27f979c62bf54a767d17c631c4875d7846359b62ea60e2.scope\": RecentStats: unable to find data in memory cache]" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.452328 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d79f765b5-s7pxp" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.529203 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-79464\" (UniqueName: \"kubernetes.io/projected/94042c36-e8dd-4a95-9288-e2b3c14d16ed-kube-api-access-79464\") pod \"94042c36-e8dd-4a95-9288-e2b3c14d16ed\" (UID: \"94042c36-e8dd-4a95-9288-e2b3c14d16ed\") " Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.529839 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/94042c36-e8dd-4a95-9288-e2b3c14d16ed-dns-svc\") pod \"94042c36-e8dd-4a95-9288-e2b3c14d16ed\" (UID: \"94042c36-e8dd-4a95-9288-e2b3c14d16ed\") " Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.529903 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/94042c36-e8dd-4a95-9288-e2b3c14d16ed-config\") pod \"94042c36-e8dd-4a95-9288-e2b3c14d16ed\" (UID: \"94042c36-e8dd-4a95-9288-e2b3c14d16ed\") " Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.538005 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94042c36-e8dd-4a95-9288-e2b3c14d16ed-kube-api-access-79464" (OuterVolumeSpecName: "kube-api-access-79464") pod "94042c36-e8dd-4a95-9288-e2b3c14d16ed" (UID: "94042c36-e8dd-4a95-9288-e2b3c14d16ed"). InnerVolumeSpecName "kube-api-access-79464". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.540379 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.561801 4910 generic.go:334] "Generic (PLEG): container finished" podID="94042c36-e8dd-4a95-9288-e2b3c14d16ed" containerID="4bc772907dbefe979a27f979c62bf54a767d17c631c4875d7846359b62ea60e2" exitCode=0 Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.562023 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d79f765b5-s7pxp" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.562136 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d79f765b5-s7pxp" event={"ID":"94042c36-e8dd-4a95-9288-e2b3c14d16ed","Type":"ContainerDied","Data":"4bc772907dbefe979a27f979c62bf54a767d17c631c4875d7846359b62ea60e2"} Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.562217 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d79f765b5-s7pxp" event={"ID":"94042c36-e8dd-4a95-9288-e2b3c14d16ed","Type":"ContainerDied","Data":"d1a3791c1193f145fce1a1ef7e66512776e479e4c51a210ea9b8c1c35460d37d"} Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.562253 4910 scope.go:117] "RemoveContainer" containerID="4bc772907dbefe979a27f979c62bf54a767d17c631c4875d7846359b62ea60e2" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.566100 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.566264 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-6gxx7" podUID="1d883f70-994d-4c7d-9cb3-04f3a29d2ff8" containerName="registry-server" containerID="cri-o://28bc7eff89bc6a90b8d7212536e4bb5237ab6d8177a54c919e639fe0502120c0" gracePeriod=2 Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.566395 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b4a03100-4353-4d31-815b-2ad6b4286473","Type":"ContainerDied","Data":"73b8fa22e0bf3e1a0cbcf0f619cfafb438bd69add552483dd8d3ac05f537c5df"} Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.583541 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/94042c36-e8dd-4a95-9288-e2b3c14d16ed-config" (OuterVolumeSpecName: "config") pod "94042c36-e8dd-4a95-9288-e2b3c14d16ed" (UID: "94042c36-e8dd-4a95-9288-e2b3c14d16ed"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.589101 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/94042c36-e8dd-4a95-9288-e2b3c14d16ed-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "94042c36-e8dd-4a95-9288-e2b3c14d16ed" (UID: "94042c36-e8dd-4a95-9288-e2b3c14d16ed"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.620815 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.624722 4910 scope.go:117] "RemoveContainer" containerID="4f14487b5b9356e354dc3fd424cb3bd2c60c03cc37ea66bf815b4a1fc000b3fe" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.630040 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.631814 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-79464\" (UniqueName: \"kubernetes.io/projected/94042c36-e8dd-4a95-9288-e2b3c14d16ed-kube-api-access-79464\") on node \"crc\" DevicePath \"\"" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.631852 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/94042c36-e8dd-4a95-9288-e2b3c14d16ed-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.631866 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/94042c36-e8dd-4a95-9288-e2b3c14d16ed-config\") on node \"crc\" DevicePath \"\"" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.654969 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 05 23:11:29 crc kubenswrapper[4910]: E0105 23:11:29.655386 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94042c36-e8dd-4a95-9288-e2b3c14d16ed" containerName="init" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.655402 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="94042c36-e8dd-4a95-9288-e2b3c14d16ed" containerName="init" Jan 05 23:11:29 crc kubenswrapper[4910]: E0105 23:11:29.655420 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94042c36-e8dd-4a95-9288-e2b3c14d16ed" containerName="dnsmasq-dns" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.655428 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="94042c36-e8dd-4a95-9288-e2b3c14d16ed" containerName="dnsmasq-dns" Jan 05 23:11:29 crc kubenswrapper[4910]: E0105 23:11:29.655437 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4a03100-4353-4d31-815b-2ad6b4286473" containerName="rabbitmq" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.655444 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4a03100-4353-4d31-815b-2ad6b4286473" containerName="rabbitmq" Jan 05 23:11:29 crc kubenswrapper[4910]: E0105 23:11:29.655460 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4a03100-4353-4d31-815b-2ad6b4286473" containerName="setup-container" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.655466 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4a03100-4353-4d31-815b-2ad6b4286473" containerName="setup-container" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.655629 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4a03100-4353-4d31-815b-2ad6b4286473" containerName="rabbitmq" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.655642 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="94042c36-e8dd-4a95-9288-e2b3c14d16ed" containerName="dnsmasq-dns" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.656619 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.660583 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.660944 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.661107 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-swqv9" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.671356 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.671512 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.677242 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.734589 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2314902d-61c8-428f-b2f7-b47e7a9c9d2e-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"2314902d-61c8-428f-b2f7-b47e7a9c9d2e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.734964 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-0be5cbae-d82d-4bda-9521-eac234c98e49\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0be5cbae-d82d-4bda-9521-eac234c98e49\") pod \"rabbitmq-cell1-server-0\" (UID: \"2314902d-61c8-428f-b2f7-b47e7a9c9d2e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.735207 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2314902d-61c8-428f-b2f7-b47e7a9c9d2e-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2314902d-61c8-428f-b2f7-b47e7a9c9d2e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.735489 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2314902d-61c8-428f-b2f7-b47e7a9c9d2e-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"2314902d-61c8-428f-b2f7-b47e7a9c9d2e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.735670 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2314902d-61c8-428f-b2f7-b47e7a9c9d2e-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"2314902d-61c8-428f-b2f7-b47e7a9c9d2e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.735779 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2314902d-61c8-428f-b2f7-b47e7a9c9d2e-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2314902d-61c8-428f-b2f7-b47e7a9c9d2e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.735888 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2314902d-61c8-428f-b2f7-b47e7a9c9d2e-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"2314902d-61c8-428f-b2f7-b47e7a9c9d2e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.735995 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2314902d-61c8-428f-b2f7-b47e7a9c9d2e-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"2314902d-61c8-428f-b2f7-b47e7a9c9d2e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.736275 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-msglp\" (UniqueName: \"kubernetes.io/projected/2314902d-61c8-428f-b2f7-b47e7a9c9d2e-kube-api-access-msglp\") pod \"rabbitmq-cell1-server-0\" (UID: \"2314902d-61c8-428f-b2f7-b47e7a9c9d2e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.760789 4910 scope.go:117] "RemoveContainer" containerID="4bc772907dbefe979a27f979c62bf54a767d17c631c4875d7846359b62ea60e2" Jan 05 23:11:29 crc kubenswrapper[4910]: E0105 23:11:29.761469 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4bc772907dbefe979a27f979c62bf54a767d17c631c4875d7846359b62ea60e2\": container with ID starting with 4bc772907dbefe979a27f979c62bf54a767d17c631c4875d7846359b62ea60e2 not found: ID does not exist" containerID="4bc772907dbefe979a27f979c62bf54a767d17c631c4875d7846359b62ea60e2" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.761506 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4bc772907dbefe979a27f979c62bf54a767d17c631c4875d7846359b62ea60e2"} err="failed to get container status \"4bc772907dbefe979a27f979c62bf54a767d17c631c4875d7846359b62ea60e2\": rpc error: code = NotFound desc = could not find container \"4bc772907dbefe979a27f979c62bf54a767d17c631c4875d7846359b62ea60e2\": container with ID starting with 4bc772907dbefe979a27f979c62bf54a767d17c631c4875d7846359b62ea60e2 not found: ID does not exist" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.761536 4910 scope.go:117] "RemoveContainer" containerID="4f14487b5b9356e354dc3fd424cb3bd2c60c03cc37ea66bf815b4a1fc000b3fe" Jan 05 23:11:29 crc kubenswrapper[4910]: E0105 23:11:29.761806 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f14487b5b9356e354dc3fd424cb3bd2c60c03cc37ea66bf815b4a1fc000b3fe\": container with ID starting with 4f14487b5b9356e354dc3fd424cb3bd2c60c03cc37ea66bf815b4a1fc000b3fe not found: ID does not exist" containerID="4f14487b5b9356e354dc3fd424cb3bd2c60c03cc37ea66bf815b4a1fc000b3fe" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.761828 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f14487b5b9356e354dc3fd424cb3bd2c60c03cc37ea66bf815b4a1fc000b3fe"} err="failed to get container status \"4f14487b5b9356e354dc3fd424cb3bd2c60c03cc37ea66bf815b4a1fc000b3fe\": rpc error: code = NotFound desc = could not find container \"4f14487b5b9356e354dc3fd424cb3bd2c60c03cc37ea66bf815b4a1fc000b3fe\": container with ID starting with 4f14487b5b9356e354dc3fd424cb3bd2c60c03cc37ea66bf815b4a1fc000b3fe not found: ID does not exist" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.761843 4910 scope.go:117] "RemoveContainer" containerID="5b20fb89f32b09e60147577be6c1686f361bde2ffc0f8a333b53007e87f75dde" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.815513 4910 scope.go:117] "RemoveContainer" containerID="801ecf70653461d6fb46b27058a2887a58241bacb58255ab7804988977d080cf" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.837508 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2314902d-61c8-428f-b2f7-b47e7a9c9d2e-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2314902d-61c8-428f-b2f7-b47e7a9c9d2e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.837565 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2314902d-61c8-428f-b2f7-b47e7a9c9d2e-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"2314902d-61c8-428f-b2f7-b47e7a9c9d2e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.837591 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2314902d-61c8-428f-b2f7-b47e7a9c9d2e-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"2314902d-61c8-428f-b2f7-b47e7a9c9d2e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.837624 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-msglp\" (UniqueName: \"kubernetes.io/projected/2314902d-61c8-428f-b2f7-b47e7a9c9d2e-kube-api-access-msglp\") pod \"rabbitmq-cell1-server-0\" (UID: \"2314902d-61c8-428f-b2f7-b47e7a9c9d2e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.837673 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2314902d-61c8-428f-b2f7-b47e7a9c9d2e-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"2314902d-61c8-428f-b2f7-b47e7a9c9d2e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.837695 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-0be5cbae-d82d-4bda-9521-eac234c98e49\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0be5cbae-d82d-4bda-9521-eac234c98e49\") pod \"rabbitmq-cell1-server-0\" (UID: \"2314902d-61c8-428f-b2f7-b47e7a9c9d2e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.837716 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2314902d-61c8-428f-b2f7-b47e7a9c9d2e-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2314902d-61c8-428f-b2f7-b47e7a9c9d2e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.837759 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2314902d-61c8-428f-b2f7-b47e7a9c9d2e-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"2314902d-61c8-428f-b2f7-b47e7a9c9d2e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.837786 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2314902d-61c8-428f-b2f7-b47e7a9c9d2e-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"2314902d-61c8-428f-b2f7-b47e7a9c9d2e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.839209 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2314902d-61c8-428f-b2f7-b47e7a9c9d2e-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"2314902d-61c8-428f-b2f7-b47e7a9c9d2e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.840074 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2314902d-61c8-428f-b2f7-b47e7a9c9d2e-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2314902d-61c8-428f-b2f7-b47e7a9c9d2e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.840193 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2314902d-61c8-428f-b2f7-b47e7a9c9d2e-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2314902d-61c8-428f-b2f7-b47e7a9c9d2e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.840678 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2314902d-61c8-428f-b2f7-b47e7a9c9d2e-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"2314902d-61c8-428f-b2f7-b47e7a9c9d2e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.843540 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2314902d-61c8-428f-b2f7-b47e7a9c9d2e-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"2314902d-61c8-428f-b2f7-b47e7a9c9d2e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.844160 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2314902d-61c8-428f-b2f7-b47e7a9c9d2e-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"2314902d-61c8-428f-b2f7-b47e7a9c9d2e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.844215 4910 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.844248 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-0be5cbae-d82d-4bda-9521-eac234c98e49\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0be5cbae-d82d-4bda-9521-eac234c98e49\") pod \"rabbitmq-cell1-server-0\" (UID: \"2314902d-61c8-428f-b2f7-b47e7a9c9d2e\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1e4b04c69fd3a5251ce0da75b7a5ef8d4c0347e55c8b081209752616552e3bbf/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.846863 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2314902d-61c8-428f-b2f7-b47e7a9c9d2e-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"2314902d-61c8-428f-b2f7-b47e7a9c9d2e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.859192 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-msglp\" (UniqueName: \"kubernetes.io/projected/2314902d-61c8-428f-b2f7-b47e7a9c9d2e-kube-api-access-msglp\") pod \"rabbitmq-cell1-server-0\" (UID: \"2314902d-61c8-428f-b2f7-b47e7a9c9d2e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.884549 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-0be5cbae-d82d-4bda-9521-eac234c98e49\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0be5cbae-d82d-4bda-9521-eac234c98e49\") pod \"rabbitmq-cell1-server-0\" (UID: \"2314902d-61c8-428f-b2f7-b47e7a9c9d2e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.912717 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6gxx7" Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.923912 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5d79f765b5-s7pxp"] Jan 05 23:11:29 crc kubenswrapper[4910]: I0105 23:11:29.931051 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5d79f765b5-s7pxp"] Jan 05 23:11:30 crc kubenswrapper[4910]: I0105 23:11:30.041371 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pmwr5\" (UniqueName: \"kubernetes.io/projected/1d883f70-994d-4c7d-9cb3-04f3a29d2ff8-kube-api-access-pmwr5\") pod \"1d883f70-994d-4c7d-9cb3-04f3a29d2ff8\" (UID: \"1d883f70-994d-4c7d-9cb3-04f3a29d2ff8\") " Jan 05 23:11:30 crc kubenswrapper[4910]: I0105 23:11:30.041560 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d883f70-994d-4c7d-9cb3-04f3a29d2ff8-catalog-content\") pod \"1d883f70-994d-4c7d-9cb3-04f3a29d2ff8\" (UID: \"1d883f70-994d-4c7d-9cb3-04f3a29d2ff8\") " Jan 05 23:11:30 crc kubenswrapper[4910]: I0105 23:11:30.041612 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d883f70-994d-4c7d-9cb3-04f3a29d2ff8-utilities\") pod \"1d883f70-994d-4c7d-9cb3-04f3a29d2ff8\" (UID: \"1d883f70-994d-4c7d-9cb3-04f3a29d2ff8\") " Jan 05 23:11:30 crc kubenswrapper[4910]: I0105 23:11:30.043881 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d883f70-994d-4c7d-9cb3-04f3a29d2ff8-utilities" (OuterVolumeSpecName: "utilities") pod "1d883f70-994d-4c7d-9cb3-04f3a29d2ff8" (UID: "1d883f70-994d-4c7d-9cb3-04f3a29d2ff8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:11:30 crc kubenswrapper[4910]: I0105 23:11:30.047495 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d883f70-994d-4c7d-9cb3-04f3a29d2ff8-kube-api-access-pmwr5" (OuterVolumeSpecName: "kube-api-access-pmwr5") pod "1d883f70-994d-4c7d-9cb3-04f3a29d2ff8" (UID: "1d883f70-994d-4c7d-9cb3-04f3a29d2ff8"). InnerVolumeSpecName "kube-api-access-pmwr5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:11:30 crc kubenswrapper[4910]: I0105 23:11:30.067503 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d883f70-994d-4c7d-9cb3-04f3a29d2ff8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d883f70-994d-4c7d-9cb3-04f3a29d2ff8" (UID: "1d883f70-994d-4c7d-9cb3-04f3a29d2ff8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:11:30 crc kubenswrapper[4910]: I0105 23:11:30.139236 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:11:30 crc kubenswrapper[4910]: I0105 23:11:30.143239 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d883f70-994d-4c7d-9cb3-04f3a29d2ff8-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 23:11:30 crc kubenswrapper[4910]: I0105 23:11:30.143278 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d883f70-994d-4c7d-9cb3-04f3a29d2ff8-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 23:11:30 crc kubenswrapper[4910]: I0105 23:11:30.143289 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pmwr5\" (UniqueName: \"kubernetes.io/projected/1d883f70-994d-4c7d-9cb3-04f3a29d2ff8-kube-api-access-pmwr5\") on node \"crc\" DevicePath \"\"" Jan 05 23:11:30 crc kubenswrapper[4910]: I0105 23:11:30.589716 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"395698cd-ffef-4d1f-959f-39c54c8b76f8","Type":"ContainerStarted","Data":"f8f4b6a12a944bf3f50739626bdcbe8fc46972029175588da197606465d038e3"} Jan 05 23:11:30 crc kubenswrapper[4910]: I0105 23:11:30.592797 4910 generic.go:334] "Generic (PLEG): container finished" podID="1d883f70-994d-4c7d-9cb3-04f3a29d2ff8" containerID="28bc7eff89bc6a90b8d7212536e4bb5237ab6d8177a54c919e639fe0502120c0" exitCode=0 Jan 05 23:11:30 crc kubenswrapper[4910]: I0105 23:11:30.592905 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6gxx7" event={"ID":"1d883f70-994d-4c7d-9cb3-04f3a29d2ff8","Type":"ContainerDied","Data":"28bc7eff89bc6a90b8d7212536e4bb5237ab6d8177a54c919e639fe0502120c0"} Jan 05 23:11:30 crc kubenswrapper[4910]: I0105 23:11:30.592997 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6gxx7" event={"ID":"1d883f70-994d-4c7d-9cb3-04f3a29d2ff8","Type":"ContainerDied","Data":"c890ea1070d8bedc7ff5c20be3ad04b9b373b7c80b5f7c1d2791033cc545e540"} Jan 05 23:11:30 crc kubenswrapper[4910]: I0105 23:11:30.592936 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6gxx7" Jan 05 23:11:30 crc kubenswrapper[4910]: I0105 23:11:30.593035 4910 scope.go:117] "RemoveContainer" containerID="28bc7eff89bc6a90b8d7212536e4bb5237ab6d8177a54c919e639fe0502120c0" Jan 05 23:11:30 crc kubenswrapper[4910]: I0105 23:11:30.625939 4910 scope.go:117] "RemoveContainer" containerID="b3efa5551bc2c18498e983f2d8bbe94cdb1936cf28af0ba435ef8f8122b44100" Jan 05 23:11:30 crc kubenswrapper[4910]: I0105 23:11:30.648676 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6gxx7"] Jan 05 23:11:30 crc kubenswrapper[4910]: I0105 23:11:30.658323 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-6gxx7"] Jan 05 23:11:30 crc kubenswrapper[4910]: I0105 23:11:30.674611 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 05 23:11:30 crc kubenswrapper[4910]: I0105 23:11:30.742251 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d883f70-994d-4c7d-9cb3-04f3a29d2ff8" path="/var/lib/kubelet/pods/1d883f70-994d-4c7d-9cb3-04f3a29d2ff8/volumes" Jan 05 23:11:30 crc kubenswrapper[4910]: I0105 23:11:30.744020 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="94042c36-e8dd-4a95-9288-e2b3c14d16ed" path="/var/lib/kubelet/pods/94042c36-e8dd-4a95-9288-e2b3c14d16ed/volumes" Jan 05 23:11:30 crc kubenswrapper[4910]: I0105 23:11:30.747584 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4a03100-4353-4d31-815b-2ad6b4286473" path="/var/lib/kubelet/pods/b4a03100-4353-4d31-815b-2ad6b4286473/volumes" Jan 05 23:11:30 crc kubenswrapper[4910]: I0105 23:11:30.811261 4910 scope.go:117] "RemoveContainer" containerID="05a53dd0096f6d23cdf17464689d9405476246726de7cb28c7371301aaae06cb" Jan 05 23:11:30 crc kubenswrapper[4910]: I0105 23:11:30.965292 4910 scope.go:117] "RemoveContainer" containerID="28bc7eff89bc6a90b8d7212536e4bb5237ab6d8177a54c919e639fe0502120c0" Jan 05 23:11:30 crc kubenswrapper[4910]: E0105 23:11:30.966374 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28bc7eff89bc6a90b8d7212536e4bb5237ab6d8177a54c919e639fe0502120c0\": container with ID starting with 28bc7eff89bc6a90b8d7212536e4bb5237ab6d8177a54c919e639fe0502120c0 not found: ID does not exist" containerID="28bc7eff89bc6a90b8d7212536e4bb5237ab6d8177a54c919e639fe0502120c0" Jan 05 23:11:30 crc kubenswrapper[4910]: I0105 23:11:30.966459 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28bc7eff89bc6a90b8d7212536e4bb5237ab6d8177a54c919e639fe0502120c0"} err="failed to get container status \"28bc7eff89bc6a90b8d7212536e4bb5237ab6d8177a54c919e639fe0502120c0\": rpc error: code = NotFound desc = could not find container \"28bc7eff89bc6a90b8d7212536e4bb5237ab6d8177a54c919e639fe0502120c0\": container with ID starting with 28bc7eff89bc6a90b8d7212536e4bb5237ab6d8177a54c919e639fe0502120c0 not found: ID does not exist" Jan 05 23:11:30 crc kubenswrapper[4910]: I0105 23:11:30.966503 4910 scope.go:117] "RemoveContainer" containerID="b3efa5551bc2c18498e983f2d8bbe94cdb1936cf28af0ba435ef8f8122b44100" Jan 05 23:11:30 crc kubenswrapper[4910]: E0105 23:11:30.967316 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3efa5551bc2c18498e983f2d8bbe94cdb1936cf28af0ba435ef8f8122b44100\": container with ID starting with b3efa5551bc2c18498e983f2d8bbe94cdb1936cf28af0ba435ef8f8122b44100 not found: ID does not exist" containerID="b3efa5551bc2c18498e983f2d8bbe94cdb1936cf28af0ba435ef8f8122b44100" Jan 05 23:11:30 crc kubenswrapper[4910]: I0105 23:11:30.967406 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3efa5551bc2c18498e983f2d8bbe94cdb1936cf28af0ba435ef8f8122b44100"} err="failed to get container status \"b3efa5551bc2c18498e983f2d8bbe94cdb1936cf28af0ba435ef8f8122b44100\": rpc error: code = NotFound desc = could not find container \"b3efa5551bc2c18498e983f2d8bbe94cdb1936cf28af0ba435ef8f8122b44100\": container with ID starting with b3efa5551bc2c18498e983f2d8bbe94cdb1936cf28af0ba435ef8f8122b44100 not found: ID does not exist" Jan 05 23:11:30 crc kubenswrapper[4910]: I0105 23:11:30.967443 4910 scope.go:117] "RemoveContainer" containerID="05a53dd0096f6d23cdf17464689d9405476246726de7cb28c7371301aaae06cb" Jan 05 23:11:30 crc kubenswrapper[4910]: E0105 23:11:30.967931 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"05a53dd0096f6d23cdf17464689d9405476246726de7cb28c7371301aaae06cb\": container with ID starting with 05a53dd0096f6d23cdf17464689d9405476246726de7cb28c7371301aaae06cb not found: ID does not exist" containerID="05a53dd0096f6d23cdf17464689d9405476246726de7cb28c7371301aaae06cb" Jan 05 23:11:30 crc kubenswrapper[4910]: I0105 23:11:30.967961 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05a53dd0096f6d23cdf17464689d9405476246726de7cb28c7371301aaae06cb"} err="failed to get container status \"05a53dd0096f6d23cdf17464689d9405476246726de7cb28c7371301aaae06cb\": rpc error: code = NotFound desc = could not find container \"05a53dd0096f6d23cdf17464689d9405476246726de7cb28c7371301aaae06cb\": container with ID starting with 05a53dd0096f6d23cdf17464689d9405476246726de7cb28c7371301aaae06cb not found: ID does not exist" Jan 05 23:11:31 crc kubenswrapper[4910]: I0105 23:11:31.604637 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"2314902d-61c8-428f-b2f7-b47e7a9c9d2e","Type":"ContainerStarted","Data":"d950a00ce3feb333c815c617054ad32de865e1762451b0eb222c57671b174798"} Jan 05 23:11:32 crc kubenswrapper[4910]: I0105 23:11:32.621213 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"395698cd-ffef-4d1f-959f-39c54c8b76f8","Type":"ContainerStarted","Data":"f6961837251e61f8bd83bdbb4969133b565a19cbd581d3f186403a42860a9295"} Jan 05 23:11:33 crc kubenswrapper[4910]: I0105 23:11:33.636152 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"2314902d-61c8-428f-b2f7-b47e7a9c9d2e","Type":"ContainerStarted","Data":"94596cffceb95998739304b645287a45dc457093b8219a611315448b2fba0c9f"} Jan 05 23:11:34 crc kubenswrapper[4910]: I0105 23:11:34.721924 4910 scope.go:117] "RemoveContainer" containerID="a91b2ed1dd18abdbeee2358c6f332ea718c2107406ac7cbb009f035571ce325d" Jan 05 23:11:34 crc kubenswrapper[4910]: E0105 23:11:34.722442 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:11:46 crc kubenswrapper[4910]: I0105 23:11:46.722328 4910 scope.go:117] "RemoveContainer" containerID="a91b2ed1dd18abdbeee2358c6f332ea718c2107406ac7cbb009f035571ce325d" Jan 05 23:11:46 crc kubenswrapper[4910]: E0105 23:11:46.723405 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:12:00 crc kubenswrapper[4910]: I0105 23:12:00.722429 4910 scope.go:117] "RemoveContainer" containerID="a91b2ed1dd18abdbeee2358c6f332ea718c2107406ac7cbb009f035571ce325d" Jan 05 23:12:00 crc kubenswrapper[4910]: E0105 23:12:00.723479 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:12:05 crc kubenswrapper[4910]: I0105 23:12:05.962490 4910 generic.go:334] "Generic (PLEG): container finished" podID="395698cd-ffef-4d1f-959f-39c54c8b76f8" containerID="f6961837251e61f8bd83bdbb4969133b565a19cbd581d3f186403a42860a9295" exitCode=0 Jan 05 23:12:05 crc kubenswrapper[4910]: I0105 23:12:05.962646 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"395698cd-ffef-4d1f-959f-39c54c8b76f8","Type":"ContainerDied","Data":"f6961837251e61f8bd83bdbb4969133b565a19cbd581d3f186403a42860a9295"} Jan 05 23:12:06 crc kubenswrapper[4910]: I0105 23:12:06.983535 4910 generic.go:334] "Generic (PLEG): container finished" podID="2314902d-61c8-428f-b2f7-b47e7a9c9d2e" containerID="94596cffceb95998739304b645287a45dc457093b8219a611315448b2fba0c9f" exitCode=0 Jan 05 23:12:06 crc kubenswrapper[4910]: I0105 23:12:06.983634 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"2314902d-61c8-428f-b2f7-b47e7a9c9d2e","Type":"ContainerDied","Data":"94596cffceb95998739304b645287a45dc457093b8219a611315448b2fba0c9f"} Jan 05 23:12:06 crc kubenswrapper[4910]: I0105 23:12:06.994315 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"395698cd-ffef-4d1f-959f-39c54c8b76f8","Type":"ContainerStarted","Data":"dc37e969ab81222a345d26b2afbcd158313ba08218d9a13de09bbf213eadc151"} Jan 05 23:12:06 crc kubenswrapper[4910]: I0105 23:12:06.994995 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 05 23:12:07 crc kubenswrapper[4910]: I0105 23:12:07.056538 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=39.056522499 podStartE2EDuration="39.056522499s" podCreationTimestamp="2026-01-05 23:11:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:12:07.049940376 +0000 UTC m=+4858.627438046" watchObservedRunningTime="2026-01-05 23:12:07.056522499 +0000 UTC m=+4858.634020159" Jan 05 23:12:08 crc kubenswrapper[4910]: I0105 23:12:08.010381 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"2314902d-61c8-428f-b2f7-b47e7a9c9d2e","Type":"ContainerStarted","Data":"f8df13ca66824d93e84a5b42ed6be533d352c7501b2671615b262965fdb859ac"} Jan 05 23:12:08 crc kubenswrapper[4910]: I0105 23:12:08.011381 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:12:08 crc kubenswrapper[4910]: I0105 23:12:08.053252 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=39.053220952 podStartE2EDuration="39.053220952s" podCreationTimestamp="2026-01-05 23:11:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:12:08.048696131 +0000 UTC m=+4859.626193811" watchObservedRunningTime="2026-01-05 23:12:08.053220952 +0000 UTC m=+4859.630718662" Jan 05 23:12:14 crc kubenswrapper[4910]: I0105 23:12:14.721407 4910 scope.go:117] "RemoveContainer" containerID="a91b2ed1dd18abdbeee2358c6f332ea718c2107406ac7cbb009f035571ce325d" Jan 05 23:12:15 crc kubenswrapper[4910]: I0105 23:12:15.088875 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerStarted","Data":"1b4947f16488761156b000cbce3970d8b169fa16ff0cca2579226d719a03df0b"} Jan 05 23:12:18 crc kubenswrapper[4910]: I0105 23:12:18.998407 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 05 23:12:20 crc kubenswrapper[4910]: I0105 23:12:20.143049 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 05 23:12:26 crc kubenswrapper[4910]: I0105 23:12:26.419880 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-1-default"] Jan 05 23:12:26 crc kubenswrapper[4910]: E0105 23:12:26.420959 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d883f70-994d-4c7d-9cb3-04f3a29d2ff8" containerName="registry-server" Jan 05 23:12:26 crc kubenswrapper[4910]: I0105 23:12:26.420978 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d883f70-994d-4c7d-9cb3-04f3a29d2ff8" containerName="registry-server" Jan 05 23:12:26 crc kubenswrapper[4910]: E0105 23:12:26.420993 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d883f70-994d-4c7d-9cb3-04f3a29d2ff8" containerName="extract-content" Jan 05 23:12:26 crc kubenswrapper[4910]: I0105 23:12:26.421001 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d883f70-994d-4c7d-9cb3-04f3a29d2ff8" containerName="extract-content" Jan 05 23:12:26 crc kubenswrapper[4910]: E0105 23:12:26.421033 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d883f70-994d-4c7d-9cb3-04f3a29d2ff8" containerName="extract-utilities" Jan 05 23:12:26 crc kubenswrapper[4910]: I0105 23:12:26.421045 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d883f70-994d-4c7d-9cb3-04f3a29d2ff8" containerName="extract-utilities" Jan 05 23:12:26 crc kubenswrapper[4910]: I0105 23:12:26.421260 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d883f70-994d-4c7d-9cb3-04f3a29d2ff8" containerName="registry-server" Jan 05 23:12:26 crc kubenswrapper[4910]: I0105 23:12:26.421917 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Jan 05 23:12:26 crc kubenswrapper[4910]: I0105 23:12:26.424578 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-kn5kc" Jan 05 23:12:26 crc kubenswrapper[4910]: I0105 23:12:26.432289 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1-default"] Jan 05 23:12:26 crc kubenswrapper[4910]: I0105 23:12:26.499917 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-48gtj\" (UniqueName: \"kubernetes.io/projected/dd90fedf-8adf-4422-bc21-f4748a71d19d-kube-api-access-48gtj\") pod \"mariadb-client-1-default\" (UID: \"dd90fedf-8adf-4422-bc21-f4748a71d19d\") " pod="openstack/mariadb-client-1-default" Jan 05 23:12:26 crc kubenswrapper[4910]: I0105 23:12:26.602567 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-48gtj\" (UniqueName: \"kubernetes.io/projected/dd90fedf-8adf-4422-bc21-f4748a71d19d-kube-api-access-48gtj\") pod \"mariadb-client-1-default\" (UID: \"dd90fedf-8adf-4422-bc21-f4748a71d19d\") " pod="openstack/mariadb-client-1-default" Jan 05 23:12:26 crc kubenswrapper[4910]: I0105 23:12:26.638101 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-48gtj\" (UniqueName: \"kubernetes.io/projected/dd90fedf-8adf-4422-bc21-f4748a71d19d-kube-api-access-48gtj\") pod \"mariadb-client-1-default\" (UID: \"dd90fedf-8adf-4422-bc21-f4748a71d19d\") " pod="openstack/mariadb-client-1-default" Jan 05 23:12:26 crc kubenswrapper[4910]: I0105 23:12:26.812796 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Jan 05 23:12:27 crc kubenswrapper[4910]: I0105 23:12:27.347043 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1-default"] Jan 05 23:12:27 crc kubenswrapper[4910]: W0105 23:12:27.359354 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddd90fedf_8adf_4422_bc21_f4748a71d19d.slice/crio-cad21dfa94e8cc97e8d9f6f29d1d09e2d55fbacd42626c0fbc7cff2ee1adafda WatchSource:0}: Error finding container cad21dfa94e8cc97e8d9f6f29d1d09e2d55fbacd42626c0fbc7cff2ee1adafda: Status 404 returned error can't find the container with id cad21dfa94e8cc97e8d9f6f29d1d09e2d55fbacd42626c0fbc7cff2ee1adafda Jan 05 23:12:27 crc kubenswrapper[4910]: I0105 23:12:27.363273 4910 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 05 23:12:28 crc kubenswrapper[4910]: I0105 23:12:28.208845 4910 generic.go:334] "Generic (PLEG): container finished" podID="dd90fedf-8adf-4422-bc21-f4748a71d19d" containerID="86fd8c561118e166c30daa69f63d8fd4a7976f8c36857959ce87258c109c1c6c" exitCode=0 Jan 05 23:12:28 crc kubenswrapper[4910]: I0105 23:12:28.208917 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1-default" event={"ID":"dd90fedf-8adf-4422-bc21-f4748a71d19d","Type":"ContainerDied","Data":"86fd8c561118e166c30daa69f63d8fd4a7976f8c36857959ce87258c109c1c6c"} Jan 05 23:12:28 crc kubenswrapper[4910]: I0105 23:12:28.209240 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1-default" event={"ID":"dd90fedf-8adf-4422-bc21-f4748a71d19d","Type":"ContainerStarted","Data":"cad21dfa94e8cc97e8d9f6f29d1d09e2d55fbacd42626c0fbc7cff2ee1adafda"} Jan 05 23:12:29 crc kubenswrapper[4910]: I0105 23:12:29.636563 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Jan 05 23:12:29 crc kubenswrapper[4910]: I0105 23:12:29.668517 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-1-default_dd90fedf-8adf-4422-bc21-f4748a71d19d/mariadb-client-1-default/0.log" Jan 05 23:12:29 crc kubenswrapper[4910]: I0105 23:12:29.714192 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-1-default"] Jan 05 23:12:29 crc kubenswrapper[4910]: I0105 23:12:29.719475 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-1-default"] Jan 05 23:12:29 crc kubenswrapper[4910]: I0105 23:12:29.762747 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-48gtj\" (UniqueName: \"kubernetes.io/projected/dd90fedf-8adf-4422-bc21-f4748a71d19d-kube-api-access-48gtj\") pod \"dd90fedf-8adf-4422-bc21-f4748a71d19d\" (UID: \"dd90fedf-8adf-4422-bc21-f4748a71d19d\") " Jan 05 23:12:29 crc kubenswrapper[4910]: I0105 23:12:29.770753 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd90fedf-8adf-4422-bc21-f4748a71d19d-kube-api-access-48gtj" (OuterVolumeSpecName: "kube-api-access-48gtj") pod "dd90fedf-8adf-4422-bc21-f4748a71d19d" (UID: "dd90fedf-8adf-4422-bc21-f4748a71d19d"). InnerVolumeSpecName "kube-api-access-48gtj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:12:29 crc kubenswrapper[4910]: I0105 23:12:29.864708 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-48gtj\" (UniqueName: \"kubernetes.io/projected/dd90fedf-8adf-4422-bc21-f4748a71d19d-kube-api-access-48gtj\") on node \"crc\" DevicePath \"\"" Jan 05 23:12:30 crc kubenswrapper[4910]: I0105 23:12:30.212116 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-2-default"] Jan 05 23:12:30 crc kubenswrapper[4910]: E0105 23:12:30.213313 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd90fedf-8adf-4422-bc21-f4748a71d19d" containerName="mariadb-client-1-default" Jan 05 23:12:30 crc kubenswrapper[4910]: I0105 23:12:30.213358 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd90fedf-8adf-4422-bc21-f4748a71d19d" containerName="mariadb-client-1-default" Jan 05 23:12:30 crc kubenswrapper[4910]: I0105 23:12:30.213684 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd90fedf-8adf-4422-bc21-f4748a71d19d" containerName="mariadb-client-1-default" Jan 05 23:12:30 crc kubenswrapper[4910]: I0105 23:12:30.214719 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Jan 05 23:12:30 crc kubenswrapper[4910]: I0105 23:12:30.231691 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2-default"] Jan 05 23:12:30 crc kubenswrapper[4910]: I0105 23:12:30.246230 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cad21dfa94e8cc97e8d9f6f29d1d09e2d55fbacd42626c0fbc7cff2ee1adafda" Jan 05 23:12:30 crc kubenswrapper[4910]: I0105 23:12:30.246385 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Jan 05 23:12:30 crc kubenswrapper[4910]: I0105 23:12:30.375940 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l44z4\" (UniqueName: \"kubernetes.io/projected/10864a95-2f74-47f9-b63b-4b3a751176e3-kube-api-access-l44z4\") pod \"mariadb-client-2-default\" (UID: \"10864a95-2f74-47f9-b63b-4b3a751176e3\") " pod="openstack/mariadb-client-2-default" Jan 05 23:12:30 crc kubenswrapper[4910]: I0105 23:12:30.480209 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l44z4\" (UniqueName: \"kubernetes.io/projected/10864a95-2f74-47f9-b63b-4b3a751176e3-kube-api-access-l44z4\") pod \"mariadb-client-2-default\" (UID: \"10864a95-2f74-47f9-b63b-4b3a751176e3\") " pod="openstack/mariadb-client-2-default" Jan 05 23:12:30 crc kubenswrapper[4910]: I0105 23:12:30.504970 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l44z4\" (UniqueName: \"kubernetes.io/projected/10864a95-2f74-47f9-b63b-4b3a751176e3-kube-api-access-l44z4\") pod \"mariadb-client-2-default\" (UID: \"10864a95-2f74-47f9-b63b-4b3a751176e3\") " pod="openstack/mariadb-client-2-default" Jan 05 23:12:30 crc kubenswrapper[4910]: I0105 23:12:30.549754 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Jan 05 23:12:30 crc kubenswrapper[4910]: I0105 23:12:30.736974 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd90fedf-8adf-4422-bc21-f4748a71d19d" path="/var/lib/kubelet/pods/dd90fedf-8adf-4422-bc21-f4748a71d19d/volumes" Jan 05 23:12:30 crc kubenswrapper[4910]: I0105 23:12:30.860225 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2-default"] Jan 05 23:12:30 crc kubenswrapper[4910]: W0105 23:12:30.870995 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod10864a95_2f74_47f9_b63b_4b3a751176e3.slice/crio-43ee2fd4b9aa742871b55e596c478892d03d9839ee29ce0e40b1586ede5e5790 WatchSource:0}: Error finding container 43ee2fd4b9aa742871b55e596c478892d03d9839ee29ce0e40b1586ede5e5790: Status 404 returned error can't find the container with id 43ee2fd4b9aa742871b55e596c478892d03d9839ee29ce0e40b1586ede5e5790 Jan 05 23:12:31 crc kubenswrapper[4910]: I0105 23:12:31.253994 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2-default" event={"ID":"10864a95-2f74-47f9-b63b-4b3a751176e3","Type":"ContainerStarted","Data":"9f3a450307ac313de983e69c9b42a5c0299d6d1a6a49278dda0c039ecaee21b1"} Jan 05 23:12:31 crc kubenswrapper[4910]: I0105 23:12:31.254487 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2-default" event={"ID":"10864a95-2f74-47f9-b63b-4b3a751176e3","Type":"ContainerStarted","Data":"43ee2fd4b9aa742871b55e596c478892d03d9839ee29ce0e40b1586ede5e5790"} Jan 05 23:12:31 crc kubenswrapper[4910]: I0105 23:12:31.272931 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mariadb-client-2-default" podStartSLOduration=1.272903245 podStartE2EDuration="1.272903245s" podCreationTimestamp="2026-01-05 23:12:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:12:31.271931331 +0000 UTC m=+4882.849429011" watchObservedRunningTime="2026-01-05 23:12:31.272903245 +0000 UTC m=+4882.850400955" Jan 05 23:12:32 crc kubenswrapper[4910]: I0105 23:12:32.268606 4910 generic.go:334] "Generic (PLEG): container finished" podID="10864a95-2f74-47f9-b63b-4b3a751176e3" containerID="9f3a450307ac313de983e69c9b42a5c0299d6d1a6a49278dda0c039ecaee21b1" exitCode=1 Jan 05 23:12:32 crc kubenswrapper[4910]: I0105 23:12:32.268725 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2-default" event={"ID":"10864a95-2f74-47f9-b63b-4b3a751176e3","Type":"ContainerDied","Data":"9f3a450307ac313de983e69c9b42a5c0299d6d1a6a49278dda0c039ecaee21b1"} Jan 05 23:12:33 crc kubenswrapper[4910]: I0105 23:12:33.844175 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Jan 05 23:12:33 crc kubenswrapper[4910]: I0105 23:12:33.895578 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-2-default"] Jan 05 23:12:33 crc kubenswrapper[4910]: I0105 23:12:33.900930 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-2-default"] Jan 05 23:12:33 crc kubenswrapper[4910]: I0105 23:12:33.951084 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l44z4\" (UniqueName: \"kubernetes.io/projected/10864a95-2f74-47f9-b63b-4b3a751176e3-kube-api-access-l44z4\") pod \"10864a95-2f74-47f9-b63b-4b3a751176e3\" (UID: \"10864a95-2f74-47f9-b63b-4b3a751176e3\") " Jan 05 23:12:33 crc kubenswrapper[4910]: I0105 23:12:33.962018 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10864a95-2f74-47f9-b63b-4b3a751176e3-kube-api-access-l44z4" (OuterVolumeSpecName: "kube-api-access-l44z4") pod "10864a95-2f74-47f9-b63b-4b3a751176e3" (UID: "10864a95-2f74-47f9-b63b-4b3a751176e3"). InnerVolumeSpecName "kube-api-access-l44z4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:12:34 crc kubenswrapper[4910]: I0105 23:12:34.053904 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l44z4\" (UniqueName: \"kubernetes.io/projected/10864a95-2f74-47f9-b63b-4b3a751176e3-kube-api-access-l44z4\") on node \"crc\" DevicePath \"\"" Jan 05 23:12:34 crc kubenswrapper[4910]: I0105 23:12:34.292390 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="43ee2fd4b9aa742871b55e596c478892d03d9839ee29ce0e40b1586ede5e5790" Jan 05 23:12:34 crc kubenswrapper[4910]: I0105 23:12:34.292518 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Jan 05 23:12:34 crc kubenswrapper[4910]: I0105 23:12:34.396578 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-1"] Jan 05 23:12:34 crc kubenswrapper[4910]: E0105 23:12:34.396941 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10864a95-2f74-47f9-b63b-4b3a751176e3" containerName="mariadb-client-2-default" Jan 05 23:12:34 crc kubenswrapper[4910]: I0105 23:12:34.396959 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="10864a95-2f74-47f9-b63b-4b3a751176e3" containerName="mariadb-client-2-default" Jan 05 23:12:34 crc kubenswrapper[4910]: I0105 23:12:34.397145 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="10864a95-2f74-47f9-b63b-4b3a751176e3" containerName="mariadb-client-2-default" Jan 05 23:12:34 crc kubenswrapper[4910]: I0105 23:12:34.397747 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Jan 05 23:12:34 crc kubenswrapper[4910]: I0105 23:12:34.400939 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-kn5kc" Jan 05 23:12:34 crc kubenswrapper[4910]: I0105 23:12:34.424815 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1"] Jan 05 23:12:34 crc kubenswrapper[4910]: I0105 23:12:34.562734 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jpf2g\" (UniqueName: \"kubernetes.io/projected/23c947ef-8021-416e-9e67-c28e3a15b65f-kube-api-access-jpf2g\") pod \"mariadb-client-1\" (UID: \"23c947ef-8021-416e-9e67-c28e3a15b65f\") " pod="openstack/mariadb-client-1" Jan 05 23:12:34 crc kubenswrapper[4910]: I0105 23:12:34.664603 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jpf2g\" (UniqueName: \"kubernetes.io/projected/23c947ef-8021-416e-9e67-c28e3a15b65f-kube-api-access-jpf2g\") pod \"mariadb-client-1\" (UID: \"23c947ef-8021-416e-9e67-c28e3a15b65f\") " pod="openstack/mariadb-client-1" Jan 05 23:12:34 crc kubenswrapper[4910]: I0105 23:12:34.701677 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jpf2g\" (UniqueName: \"kubernetes.io/projected/23c947ef-8021-416e-9e67-c28e3a15b65f-kube-api-access-jpf2g\") pod \"mariadb-client-1\" (UID: \"23c947ef-8021-416e-9e67-c28e3a15b65f\") " pod="openstack/mariadb-client-1" Jan 05 23:12:34 crc kubenswrapper[4910]: I0105 23:12:34.721324 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Jan 05 23:12:34 crc kubenswrapper[4910]: I0105 23:12:34.744461 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10864a95-2f74-47f9-b63b-4b3a751176e3" path="/var/lib/kubelet/pods/10864a95-2f74-47f9-b63b-4b3a751176e3/volumes" Jan 05 23:12:35 crc kubenswrapper[4910]: I0105 23:12:35.116361 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1"] Jan 05 23:12:35 crc kubenswrapper[4910]: W0105 23:12:35.125655 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod23c947ef_8021_416e_9e67_c28e3a15b65f.slice/crio-108fc92cae8546014f525337acefb6be4bf7d8ece0a51e931f18eecf070c698d WatchSource:0}: Error finding container 108fc92cae8546014f525337acefb6be4bf7d8ece0a51e931f18eecf070c698d: Status 404 returned error can't find the container with id 108fc92cae8546014f525337acefb6be4bf7d8ece0a51e931f18eecf070c698d Jan 05 23:12:35 crc kubenswrapper[4910]: I0105 23:12:35.305062 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1" event={"ID":"23c947ef-8021-416e-9e67-c28e3a15b65f","Type":"ContainerStarted","Data":"108fc92cae8546014f525337acefb6be4bf7d8ece0a51e931f18eecf070c698d"} Jan 05 23:12:36 crc kubenswrapper[4910]: I0105 23:12:36.340978 4910 generic.go:334] "Generic (PLEG): container finished" podID="23c947ef-8021-416e-9e67-c28e3a15b65f" containerID="262fd24133d16861ec119503b8b080e9eb4d699f023dd4eeb0bbce0a04fcabfb" exitCode=0 Jan 05 23:12:36 crc kubenswrapper[4910]: I0105 23:12:36.341089 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1" event={"ID":"23c947ef-8021-416e-9e67-c28e3a15b65f","Type":"ContainerDied","Data":"262fd24133d16861ec119503b8b080e9eb4d699f023dd4eeb0bbce0a04fcabfb"} Jan 05 23:12:37 crc kubenswrapper[4910]: I0105 23:12:37.819567 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Jan 05 23:12:37 crc kubenswrapper[4910]: I0105 23:12:37.843370 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-1_23c947ef-8021-416e-9e67-c28e3a15b65f/mariadb-client-1/0.log" Jan 05 23:12:37 crc kubenswrapper[4910]: I0105 23:12:37.866021 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-1"] Jan 05 23:12:37 crc kubenswrapper[4910]: I0105 23:12:37.873363 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-1"] Jan 05 23:12:37 crc kubenswrapper[4910]: I0105 23:12:37.928527 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jpf2g\" (UniqueName: \"kubernetes.io/projected/23c947ef-8021-416e-9e67-c28e3a15b65f-kube-api-access-jpf2g\") pod \"23c947ef-8021-416e-9e67-c28e3a15b65f\" (UID: \"23c947ef-8021-416e-9e67-c28e3a15b65f\") " Jan 05 23:12:37 crc kubenswrapper[4910]: I0105 23:12:37.937811 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23c947ef-8021-416e-9e67-c28e3a15b65f-kube-api-access-jpf2g" (OuterVolumeSpecName: "kube-api-access-jpf2g") pod "23c947ef-8021-416e-9e67-c28e3a15b65f" (UID: "23c947ef-8021-416e-9e67-c28e3a15b65f"). InnerVolumeSpecName "kube-api-access-jpf2g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:12:38 crc kubenswrapper[4910]: I0105 23:12:38.031326 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jpf2g\" (UniqueName: \"kubernetes.io/projected/23c947ef-8021-416e-9e67-c28e3a15b65f-kube-api-access-jpf2g\") on node \"crc\" DevicePath \"\"" Jan 05 23:12:38 crc kubenswrapper[4910]: I0105 23:12:38.365244 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Jan 05 23:12:38 crc kubenswrapper[4910]: I0105 23:12:38.365254 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="108fc92cae8546014f525337acefb6be4bf7d8ece0a51e931f18eecf070c698d" Jan 05 23:12:38 crc kubenswrapper[4910]: I0105 23:12:38.413297 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-4-default"] Jan 05 23:12:38 crc kubenswrapper[4910]: E0105 23:12:38.413735 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23c947ef-8021-416e-9e67-c28e3a15b65f" containerName="mariadb-client-1" Jan 05 23:12:38 crc kubenswrapper[4910]: I0105 23:12:38.413756 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="23c947ef-8021-416e-9e67-c28e3a15b65f" containerName="mariadb-client-1" Jan 05 23:12:38 crc kubenswrapper[4910]: I0105 23:12:38.413962 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="23c947ef-8021-416e-9e67-c28e3a15b65f" containerName="mariadb-client-1" Jan 05 23:12:38 crc kubenswrapper[4910]: I0105 23:12:38.414645 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Jan 05 23:12:38 crc kubenswrapper[4910]: I0105 23:12:38.417784 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-kn5kc" Jan 05 23:12:38 crc kubenswrapper[4910]: I0105 23:12:38.428394 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-4-default"] Jan 05 23:12:38 crc kubenswrapper[4910]: I0105 23:12:38.540717 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdt48\" (UniqueName: \"kubernetes.io/projected/6269febf-1468-461a-abff-049243ed5e45-kube-api-access-jdt48\") pod \"mariadb-client-4-default\" (UID: \"6269febf-1468-461a-abff-049243ed5e45\") " pod="openstack/mariadb-client-4-default" Jan 05 23:12:38 crc kubenswrapper[4910]: I0105 23:12:38.642225 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdt48\" (UniqueName: \"kubernetes.io/projected/6269febf-1468-461a-abff-049243ed5e45-kube-api-access-jdt48\") pod \"mariadb-client-4-default\" (UID: \"6269febf-1468-461a-abff-049243ed5e45\") " pod="openstack/mariadb-client-4-default" Jan 05 23:12:38 crc kubenswrapper[4910]: I0105 23:12:38.667467 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdt48\" (UniqueName: \"kubernetes.io/projected/6269febf-1468-461a-abff-049243ed5e45-kube-api-access-jdt48\") pod \"mariadb-client-4-default\" (UID: \"6269febf-1468-461a-abff-049243ed5e45\") " pod="openstack/mariadb-client-4-default" Jan 05 23:12:38 crc kubenswrapper[4910]: I0105 23:12:38.735678 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Jan 05 23:12:38 crc kubenswrapper[4910]: I0105 23:12:38.740579 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="23c947ef-8021-416e-9e67-c28e3a15b65f" path="/var/lib/kubelet/pods/23c947ef-8021-416e-9e67-c28e3a15b65f/volumes" Jan 05 23:12:39 crc kubenswrapper[4910]: I0105 23:12:39.173266 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-4-default"] Jan 05 23:12:39 crc kubenswrapper[4910]: W0105 23:12:39.178199 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6269febf_1468_461a_abff_049243ed5e45.slice/crio-97a13d3b1084a5454066d03488e9e1de027f35eedd50bbc98e3de420fd5f3b3c WatchSource:0}: Error finding container 97a13d3b1084a5454066d03488e9e1de027f35eedd50bbc98e3de420fd5f3b3c: Status 404 returned error can't find the container with id 97a13d3b1084a5454066d03488e9e1de027f35eedd50bbc98e3de420fd5f3b3c Jan 05 23:12:39 crc kubenswrapper[4910]: I0105 23:12:39.379085 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-4-default" event={"ID":"6269febf-1468-461a-abff-049243ed5e45","Type":"ContainerStarted","Data":"a228175b818349bbb004a6d8b48a94b20f44c05354e283a0fc587e1d9a29c91c"} Jan 05 23:12:39 crc kubenswrapper[4910]: I0105 23:12:39.379182 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-4-default" event={"ID":"6269febf-1468-461a-abff-049243ed5e45","Type":"ContainerStarted","Data":"97a13d3b1084a5454066d03488e9e1de027f35eedd50bbc98e3de420fd5f3b3c"} Jan 05 23:12:39 crc kubenswrapper[4910]: I0105 23:12:39.409197 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mariadb-client-4-default" podStartSLOduration=1.40915878 podStartE2EDuration="1.40915878s" podCreationTimestamp="2026-01-05 23:12:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:12:39.394270722 +0000 UTC m=+4890.971768442" watchObservedRunningTime="2026-01-05 23:12:39.40915878 +0000 UTC m=+4890.986656470" Jan 05 23:12:39 crc kubenswrapper[4910]: I0105 23:12:39.459800 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-4-default_6269febf-1468-461a-abff-049243ed5e45/mariadb-client-4-default/0.log" Jan 05 23:12:40 crc kubenswrapper[4910]: I0105 23:12:40.389844 4910 generic.go:334] "Generic (PLEG): container finished" podID="6269febf-1468-461a-abff-049243ed5e45" containerID="a228175b818349bbb004a6d8b48a94b20f44c05354e283a0fc587e1d9a29c91c" exitCode=0 Jan 05 23:12:40 crc kubenswrapper[4910]: I0105 23:12:40.389908 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-4-default" event={"ID":"6269febf-1468-461a-abff-049243ed5e45","Type":"ContainerDied","Data":"a228175b818349bbb004a6d8b48a94b20f44c05354e283a0fc587e1d9a29c91c"} Jan 05 23:12:41 crc kubenswrapper[4910]: I0105 23:12:41.876387 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Jan 05 23:12:41 crc kubenswrapper[4910]: I0105 23:12:41.914649 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-4-default"] Jan 05 23:12:41 crc kubenswrapper[4910]: I0105 23:12:41.920951 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-4-default"] Jan 05 23:12:42 crc kubenswrapper[4910]: I0105 23:12:42.007283 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jdt48\" (UniqueName: \"kubernetes.io/projected/6269febf-1468-461a-abff-049243ed5e45-kube-api-access-jdt48\") pod \"6269febf-1468-461a-abff-049243ed5e45\" (UID: \"6269febf-1468-461a-abff-049243ed5e45\") " Jan 05 23:12:42 crc kubenswrapper[4910]: I0105 23:12:42.017214 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6269febf-1468-461a-abff-049243ed5e45-kube-api-access-jdt48" (OuterVolumeSpecName: "kube-api-access-jdt48") pod "6269febf-1468-461a-abff-049243ed5e45" (UID: "6269febf-1468-461a-abff-049243ed5e45"). InnerVolumeSpecName "kube-api-access-jdt48". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:12:42 crc kubenswrapper[4910]: I0105 23:12:42.109515 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jdt48\" (UniqueName: \"kubernetes.io/projected/6269febf-1468-461a-abff-049243ed5e45-kube-api-access-jdt48\") on node \"crc\" DevicePath \"\"" Jan 05 23:12:42 crc kubenswrapper[4910]: I0105 23:12:42.416968 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="97a13d3b1084a5454066d03488e9e1de027f35eedd50bbc98e3de420fd5f3b3c" Jan 05 23:12:42 crc kubenswrapper[4910]: I0105 23:12:42.417032 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Jan 05 23:12:42 crc kubenswrapper[4910]: I0105 23:12:42.736500 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6269febf-1468-461a-abff-049243ed5e45" path="/var/lib/kubelet/pods/6269febf-1468-461a-abff-049243ed5e45/volumes" Jan 05 23:12:45 crc kubenswrapper[4910]: I0105 23:12:45.827821 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-5-default"] Jan 05 23:12:45 crc kubenswrapper[4910]: E0105 23:12:45.828621 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6269febf-1468-461a-abff-049243ed5e45" containerName="mariadb-client-4-default" Jan 05 23:12:45 crc kubenswrapper[4910]: I0105 23:12:45.828642 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="6269febf-1468-461a-abff-049243ed5e45" containerName="mariadb-client-4-default" Jan 05 23:12:45 crc kubenswrapper[4910]: I0105 23:12:45.828942 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="6269febf-1468-461a-abff-049243ed5e45" containerName="mariadb-client-4-default" Jan 05 23:12:45 crc kubenswrapper[4910]: I0105 23:12:45.829719 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Jan 05 23:12:45 crc kubenswrapper[4910]: I0105 23:12:45.837638 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-kn5kc" Jan 05 23:12:45 crc kubenswrapper[4910]: I0105 23:12:45.856918 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-5-default"] Jan 05 23:12:45 crc kubenswrapper[4910]: I0105 23:12:45.885918 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k9dft\" (UniqueName: \"kubernetes.io/projected/7e1e544f-d516-40f7-8d03-c4c2d7f5a0fb-kube-api-access-k9dft\") pod \"mariadb-client-5-default\" (UID: \"7e1e544f-d516-40f7-8d03-c4c2d7f5a0fb\") " pod="openstack/mariadb-client-5-default" Jan 05 23:12:45 crc kubenswrapper[4910]: I0105 23:12:45.988051 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k9dft\" (UniqueName: \"kubernetes.io/projected/7e1e544f-d516-40f7-8d03-c4c2d7f5a0fb-kube-api-access-k9dft\") pod \"mariadb-client-5-default\" (UID: \"7e1e544f-d516-40f7-8d03-c4c2d7f5a0fb\") " pod="openstack/mariadb-client-5-default" Jan 05 23:12:46 crc kubenswrapper[4910]: I0105 23:12:46.025676 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k9dft\" (UniqueName: \"kubernetes.io/projected/7e1e544f-d516-40f7-8d03-c4c2d7f5a0fb-kube-api-access-k9dft\") pod \"mariadb-client-5-default\" (UID: \"7e1e544f-d516-40f7-8d03-c4c2d7f5a0fb\") " pod="openstack/mariadb-client-5-default" Jan 05 23:12:46 crc kubenswrapper[4910]: I0105 23:12:46.160720 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Jan 05 23:12:46 crc kubenswrapper[4910]: I0105 23:12:46.527530 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-5-default"] Jan 05 23:12:47 crc kubenswrapper[4910]: I0105 23:12:47.471432 4910 generic.go:334] "Generic (PLEG): container finished" podID="7e1e544f-d516-40f7-8d03-c4c2d7f5a0fb" containerID="86c46c207b16005f4b2d841a6704c8ab9922d5b301a9d1d884c8411e1529ff6e" exitCode=0 Jan 05 23:12:47 crc kubenswrapper[4910]: I0105 23:12:47.471521 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-5-default" event={"ID":"7e1e544f-d516-40f7-8d03-c4c2d7f5a0fb","Type":"ContainerDied","Data":"86c46c207b16005f4b2d841a6704c8ab9922d5b301a9d1d884c8411e1529ff6e"} Jan 05 23:12:47 crc kubenswrapper[4910]: I0105 23:12:47.471947 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-5-default" event={"ID":"7e1e544f-d516-40f7-8d03-c4c2d7f5a0fb","Type":"ContainerStarted","Data":"4e4db8eaa7027b3043b494eacb06a0f105b039cb6b7ec7d32c4273ec5c08d83b"} Jan 05 23:12:49 crc kubenswrapper[4910]: I0105 23:12:49.024500 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Jan 05 23:12:49 crc kubenswrapper[4910]: I0105 23:12:49.052152 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-5-default_7e1e544f-d516-40f7-8d03-c4c2d7f5a0fb/mariadb-client-5-default/0.log" Jan 05 23:12:49 crc kubenswrapper[4910]: I0105 23:12:49.088764 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-5-default"] Jan 05 23:12:49 crc kubenswrapper[4910]: I0105 23:12:49.100004 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-5-default"] Jan 05 23:12:49 crc kubenswrapper[4910]: I0105 23:12:49.149329 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k9dft\" (UniqueName: \"kubernetes.io/projected/7e1e544f-d516-40f7-8d03-c4c2d7f5a0fb-kube-api-access-k9dft\") pod \"7e1e544f-d516-40f7-8d03-c4c2d7f5a0fb\" (UID: \"7e1e544f-d516-40f7-8d03-c4c2d7f5a0fb\") " Jan 05 23:12:49 crc kubenswrapper[4910]: I0105 23:12:49.157461 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e1e544f-d516-40f7-8d03-c4c2d7f5a0fb-kube-api-access-k9dft" (OuterVolumeSpecName: "kube-api-access-k9dft") pod "7e1e544f-d516-40f7-8d03-c4c2d7f5a0fb" (UID: "7e1e544f-d516-40f7-8d03-c4c2d7f5a0fb"). InnerVolumeSpecName "kube-api-access-k9dft". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:12:49 crc kubenswrapper[4910]: I0105 23:12:49.267273 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k9dft\" (UniqueName: \"kubernetes.io/projected/7e1e544f-d516-40f7-8d03-c4c2d7f5a0fb-kube-api-access-k9dft\") on node \"crc\" DevicePath \"\"" Jan 05 23:12:49 crc kubenswrapper[4910]: I0105 23:12:49.316597 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-6-default"] Jan 05 23:12:49 crc kubenswrapper[4910]: E0105 23:12:49.317199 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e1e544f-d516-40f7-8d03-c4c2d7f5a0fb" containerName="mariadb-client-5-default" Jan 05 23:12:49 crc kubenswrapper[4910]: I0105 23:12:49.317225 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e1e544f-d516-40f7-8d03-c4c2d7f5a0fb" containerName="mariadb-client-5-default" Jan 05 23:12:49 crc kubenswrapper[4910]: I0105 23:12:49.317480 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e1e544f-d516-40f7-8d03-c4c2d7f5a0fb" containerName="mariadb-client-5-default" Jan 05 23:12:49 crc kubenswrapper[4910]: I0105 23:12:49.318311 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Jan 05 23:12:49 crc kubenswrapper[4910]: I0105 23:12:49.336978 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-6-default"] Jan 05 23:12:49 crc kubenswrapper[4910]: I0105 23:12:49.472984 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l76gp\" (UniqueName: \"kubernetes.io/projected/004d1582-60ae-4353-817b-ac418bef62c9-kube-api-access-l76gp\") pod \"mariadb-client-6-default\" (UID: \"004d1582-60ae-4353-817b-ac418bef62c9\") " pod="openstack/mariadb-client-6-default" Jan 05 23:12:49 crc kubenswrapper[4910]: I0105 23:12:49.490680 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4e4db8eaa7027b3043b494eacb06a0f105b039cb6b7ec7d32c4273ec5c08d83b" Jan 05 23:12:49 crc kubenswrapper[4910]: I0105 23:12:49.490769 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Jan 05 23:12:49 crc kubenswrapper[4910]: I0105 23:12:49.574065 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l76gp\" (UniqueName: \"kubernetes.io/projected/004d1582-60ae-4353-817b-ac418bef62c9-kube-api-access-l76gp\") pod \"mariadb-client-6-default\" (UID: \"004d1582-60ae-4353-817b-ac418bef62c9\") " pod="openstack/mariadb-client-6-default" Jan 05 23:12:49 crc kubenswrapper[4910]: I0105 23:12:49.595391 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l76gp\" (UniqueName: \"kubernetes.io/projected/004d1582-60ae-4353-817b-ac418bef62c9-kube-api-access-l76gp\") pod \"mariadb-client-6-default\" (UID: \"004d1582-60ae-4353-817b-ac418bef62c9\") " pod="openstack/mariadb-client-6-default" Jan 05 23:12:49 crc kubenswrapper[4910]: I0105 23:12:49.653748 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Jan 05 23:12:50 crc kubenswrapper[4910]: I0105 23:12:50.290465 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-6-default"] Jan 05 23:12:50 crc kubenswrapper[4910]: W0105 23:12:50.303458 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod004d1582_60ae_4353_817b_ac418bef62c9.slice/crio-b3c47ad54533a2eab13dbf43ec1c66c2b78c883b0f6af1b7aa67282de9518242 WatchSource:0}: Error finding container b3c47ad54533a2eab13dbf43ec1c66c2b78c883b0f6af1b7aa67282de9518242: Status 404 returned error can't find the container with id b3c47ad54533a2eab13dbf43ec1c66c2b78c883b0f6af1b7aa67282de9518242 Jan 05 23:12:50 crc kubenswrapper[4910]: I0105 23:12:50.500283 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-6-default" event={"ID":"004d1582-60ae-4353-817b-ac418bef62c9","Type":"ContainerStarted","Data":"9fcacaa850bdeda006c85e393c14525b24ec5f17067751f6c11e958e0c747ad5"} Jan 05 23:12:50 crc kubenswrapper[4910]: I0105 23:12:50.500338 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-6-default" event={"ID":"004d1582-60ae-4353-817b-ac418bef62c9","Type":"ContainerStarted","Data":"b3c47ad54533a2eab13dbf43ec1c66c2b78c883b0f6af1b7aa67282de9518242"} Jan 05 23:12:50 crc kubenswrapper[4910]: I0105 23:12:50.524284 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mariadb-client-6-default" podStartSLOduration=1.524261998 podStartE2EDuration="1.524261998s" podCreationTimestamp="2026-01-05 23:12:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:12:50.517996413 +0000 UTC m=+4902.095494083" watchObservedRunningTime="2026-01-05 23:12:50.524261998 +0000 UTC m=+4902.101759668" Jan 05 23:12:50 crc kubenswrapper[4910]: I0105 23:12:50.735039 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e1e544f-d516-40f7-8d03-c4c2d7f5a0fb" path="/var/lib/kubelet/pods/7e1e544f-d516-40f7-8d03-c4c2d7f5a0fb/volumes" Jan 05 23:12:51 crc kubenswrapper[4910]: I0105 23:12:51.509089 4910 generic.go:334] "Generic (PLEG): container finished" podID="004d1582-60ae-4353-817b-ac418bef62c9" containerID="9fcacaa850bdeda006c85e393c14525b24ec5f17067751f6c11e958e0c747ad5" exitCode=1 Jan 05 23:12:51 crc kubenswrapper[4910]: I0105 23:12:51.509174 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-6-default" event={"ID":"004d1582-60ae-4353-817b-ac418bef62c9","Type":"ContainerDied","Data":"9fcacaa850bdeda006c85e393c14525b24ec5f17067751f6c11e958e0c747ad5"} Jan 05 23:12:52 crc kubenswrapper[4910]: I0105 23:12:52.978528 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Jan 05 23:12:53 crc kubenswrapper[4910]: I0105 23:12:53.021733 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-6-default"] Jan 05 23:12:53 crc kubenswrapper[4910]: I0105 23:12:53.031235 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-6-default"] Jan 05 23:12:53 crc kubenswrapper[4910]: I0105 23:12:53.140181 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l76gp\" (UniqueName: \"kubernetes.io/projected/004d1582-60ae-4353-817b-ac418bef62c9-kube-api-access-l76gp\") pod \"004d1582-60ae-4353-817b-ac418bef62c9\" (UID: \"004d1582-60ae-4353-817b-ac418bef62c9\") " Jan 05 23:12:53 crc kubenswrapper[4910]: I0105 23:12:53.151358 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/004d1582-60ae-4353-817b-ac418bef62c9-kube-api-access-l76gp" (OuterVolumeSpecName: "kube-api-access-l76gp") pod "004d1582-60ae-4353-817b-ac418bef62c9" (UID: "004d1582-60ae-4353-817b-ac418bef62c9"). InnerVolumeSpecName "kube-api-access-l76gp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:12:53 crc kubenswrapper[4910]: I0105 23:12:53.228912 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-7-default"] Jan 05 23:12:53 crc kubenswrapper[4910]: E0105 23:12:53.229523 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="004d1582-60ae-4353-817b-ac418bef62c9" containerName="mariadb-client-6-default" Jan 05 23:12:53 crc kubenswrapper[4910]: I0105 23:12:53.229555 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="004d1582-60ae-4353-817b-ac418bef62c9" containerName="mariadb-client-6-default" Jan 05 23:12:53 crc kubenswrapper[4910]: I0105 23:12:53.229780 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="004d1582-60ae-4353-817b-ac418bef62c9" containerName="mariadb-client-6-default" Jan 05 23:12:53 crc kubenswrapper[4910]: I0105 23:12:53.230631 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Jan 05 23:12:53 crc kubenswrapper[4910]: I0105 23:12:53.243014 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l76gp\" (UniqueName: \"kubernetes.io/projected/004d1582-60ae-4353-817b-ac418bef62c9-kube-api-access-l76gp\") on node \"crc\" DevicePath \"\"" Jan 05 23:12:53 crc kubenswrapper[4910]: I0105 23:12:53.244836 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-7-default"] Jan 05 23:12:53 crc kubenswrapper[4910]: I0105 23:12:53.345294 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4knvb\" (UniqueName: \"kubernetes.io/projected/500ba607-b226-4afa-8a43-30d2a6b8147f-kube-api-access-4knvb\") pod \"mariadb-client-7-default\" (UID: \"500ba607-b226-4afa-8a43-30d2a6b8147f\") " pod="openstack/mariadb-client-7-default" Jan 05 23:12:53 crc kubenswrapper[4910]: I0105 23:12:53.448144 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4knvb\" (UniqueName: \"kubernetes.io/projected/500ba607-b226-4afa-8a43-30d2a6b8147f-kube-api-access-4knvb\") pod \"mariadb-client-7-default\" (UID: \"500ba607-b226-4afa-8a43-30d2a6b8147f\") " pod="openstack/mariadb-client-7-default" Jan 05 23:12:53 crc kubenswrapper[4910]: I0105 23:12:53.480578 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4knvb\" (UniqueName: \"kubernetes.io/projected/500ba607-b226-4afa-8a43-30d2a6b8147f-kube-api-access-4knvb\") pod \"mariadb-client-7-default\" (UID: \"500ba607-b226-4afa-8a43-30d2a6b8147f\") " pod="openstack/mariadb-client-7-default" Jan 05 23:12:53 crc kubenswrapper[4910]: I0105 23:12:53.536505 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b3c47ad54533a2eab13dbf43ec1c66c2b78c883b0f6af1b7aa67282de9518242" Jan 05 23:12:53 crc kubenswrapper[4910]: I0105 23:12:53.536659 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Jan 05 23:12:53 crc kubenswrapper[4910]: I0105 23:12:53.553409 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Jan 05 23:12:54 crc kubenswrapper[4910]: I0105 23:12:54.158946 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-7-default"] Jan 05 23:12:54 crc kubenswrapper[4910]: I0105 23:12:54.552282 4910 generic.go:334] "Generic (PLEG): container finished" podID="500ba607-b226-4afa-8a43-30d2a6b8147f" containerID="587b71da4fbb3281df05fa00fce9c4aa0616966feda701069ff8a814c8c6c9dc" exitCode=0 Jan 05 23:12:54 crc kubenswrapper[4910]: I0105 23:12:54.552354 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-7-default" event={"ID":"500ba607-b226-4afa-8a43-30d2a6b8147f","Type":"ContainerDied","Data":"587b71da4fbb3281df05fa00fce9c4aa0616966feda701069ff8a814c8c6c9dc"} Jan 05 23:12:54 crc kubenswrapper[4910]: I0105 23:12:54.552400 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-7-default" event={"ID":"500ba607-b226-4afa-8a43-30d2a6b8147f","Type":"ContainerStarted","Data":"0f9075f5b4fae04bcfb7b87c74d162de046e8faa0c7f4b95127eb614d09b489c"} Jan 05 23:12:54 crc kubenswrapper[4910]: I0105 23:12:54.733203 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="004d1582-60ae-4353-817b-ac418bef62c9" path="/var/lib/kubelet/pods/004d1582-60ae-4353-817b-ac418bef62c9/volumes" Jan 05 23:12:56 crc kubenswrapper[4910]: I0105 23:12:56.053685 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Jan 05 23:12:56 crc kubenswrapper[4910]: I0105 23:12:56.076540 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-7-default_500ba607-b226-4afa-8a43-30d2a6b8147f/mariadb-client-7-default/0.log" Jan 05 23:12:56 crc kubenswrapper[4910]: I0105 23:12:56.109026 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4knvb\" (UniqueName: \"kubernetes.io/projected/500ba607-b226-4afa-8a43-30d2a6b8147f-kube-api-access-4knvb\") pod \"500ba607-b226-4afa-8a43-30d2a6b8147f\" (UID: \"500ba607-b226-4afa-8a43-30d2a6b8147f\") " Jan 05 23:12:56 crc kubenswrapper[4910]: I0105 23:12:56.115545 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/500ba607-b226-4afa-8a43-30d2a6b8147f-kube-api-access-4knvb" (OuterVolumeSpecName: "kube-api-access-4knvb") pod "500ba607-b226-4afa-8a43-30d2a6b8147f" (UID: "500ba607-b226-4afa-8a43-30d2a6b8147f"). InnerVolumeSpecName "kube-api-access-4knvb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:12:56 crc kubenswrapper[4910]: I0105 23:12:56.143718 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-7-default"] Jan 05 23:12:56 crc kubenswrapper[4910]: I0105 23:12:56.152610 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-7-default"] Jan 05 23:12:56 crc kubenswrapper[4910]: I0105 23:12:56.212197 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4knvb\" (UniqueName: \"kubernetes.io/projected/500ba607-b226-4afa-8a43-30d2a6b8147f-kube-api-access-4knvb\") on node \"crc\" DevicePath \"\"" Jan 05 23:12:56 crc kubenswrapper[4910]: I0105 23:12:56.301666 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-2"] Jan 05 23:12:56 crc kubenswrapper[4910]: E0105 23:12:56.302049 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="500ba607-b226-4afa-8a43-30d2a6b8147f" containerName="mariadb-client-7-default" Jan 05 23:12:56 crc kubenswrapper[4910]: I0105 23:12:56.302074 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="500ba607-b226-4afa-8a43-30d2a6b8147f" containerName="mariadb-client-7-default" Jan 05 23:12:56 crc kubenswrapper[4910]: I0105 23:12:56.302316 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="500ba607-b226-4afa-8a43-30d2a6b8147f" containerName="mariadb-client-7-default" Jan 05 23:12:56 crc kubenswrapper[4910]: I0105 23:12:56.302991 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Jan 05 23:12:56 crc kubenswrapper[4910]: I0105 23:12:56.314172 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-79vqn\" (UniqueName: \"kubernetes.io/projected/c3e60145-66e6-43fe-9307-5f2bcac7bcb0-kube-api-access-79vqn\") pod \"mariadb-client-2\" (UID: \"c3e60145-66e6-43fe-9307-5f2bcac7bcb0\") " pod="openstack/mariadb-client-2" Jan 05 23:12:56 crc kubenswrapper[4910]: I0105 23:12:56.317725 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2"] Jan 05 23:12:56 crc kubenswrapper[4910]: I0105 23:12:56.415038 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-79vqn\" (UniqueName: \"kubernetes.io/projected/c3e60145-66e6-43fe-9307-5f2bcac7bcb0-kube-api-access-79vqn\") pod \"mariadb-client-2\" (UID: \"c3e60145-66e6-43fe-9307-5f2bcac7bcb0\") " pod="openstack/mariadb-client-2" Jan 05 23:12:56 crc kubenswrapper[4910]: I0105 23:12:56.436366 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-79vqn\" (UniqueName: \"kubernetes.io/projected/c3e60145-66e6-43fe-9307-5f2bcac7bcb0-kube-api-access-79vqn\") pod \"mariadb-client-2\" (UID: \"c3e60145-66e6-43fe-9307-5f2bcac7bcb0\") " pod="openstack/mariadb-client-2" Jan 05 23:12:56 crc kubenswrapper[4910]: I0105 23:12:56.575079 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0f9075f5b4fae04bcfb7b87c74d162de046e8faa0c7f4b95127eb614d09b489c" Jan 05 23:12:56 crc kubenswrapper[4910]: I0105 23:12:56.575173 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Jan 05 23:12:56 crc kubenswrapper[4910]: I0105 23:12:56.629002 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Jan 05 23:12:56 crc kubenswrapper[4910]: I0105 23:12:56.773579 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="500ba607-b226-4afa-8a43-30d2a6b8147f" path="/var/lib/kubelet/pods/500ba607-b226-4afa-8a43-30d2a6b8147f/volumes" Jan 05 23:12:57 crc kubenswrapper[4910]: I0105 23:12:57.212391 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2"] Jan 05 23:12:57 crc kubenswrapper[4910]: W0105 23:12:57.214494 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc3e60145_66e6_43fe_9307_5f2bcac7bcb0.slice/crio-76e3b18f248b09a1e165f24a37025517f0a93313c433c456f72ff183a5e990e2 WatchSource:0}: Error finding container 76e3b18f248b09a1e165f24a37025517f0a93313c433c456f72ff183a5e990e2: Status 404 returned error can't find the container with id 76e3b18f248b09a1e165f24a37025517f0a93313c433c456f72ff183a5e990e2 Jan 05 23:12:57 crc kubenswrapper[4910]: I0105 23:12:57.590063 4910 generic.go:334] "Generic (PLEG): container finished" podID="c3e60145-66e6-43fe-9307-5f2bcac7bcb0" containerID="28e529ed0326863121da9918c08a8d2325dda120b46be4de1ecb71f4cb0e1bf3" exitCode=0 Jan 05 23:12:57 crc kubenswrapper[4910]: I0105 23:12:57.590171 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2" event={"ID":"c3e60145-66e6-43fe-9307-5f2bcac7bcb0","Type":"ContainerDied","Data":"28e529ed0326863121da9918c08a8d2325dda120b46be4de1ecb71f4cb0e1bf3"} Jan 05 23:12:57 crc kubenswrapper[4910]: I0105 23:12:57.590232 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2" event={"ID":"c3e60145-66e6-43fe-9307-5f2bcac7bcb0","Type":"ContainerStarted","Data":"76e3b18f248b09a1e165f24a37025517f0a93313c433c456f72ff183a5e990e2"} Jan 05 23:12:58 crc kubenswrapper[4910]: I0105 23:12:58.997161 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Jan 05 23:12:59 crc kubenswrapper[4910]: I0105 23:12:59.022160 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-2_c3e60145-66e6-43fe-9307-5f2bcac7bcb0/mariadb-client-2/0.log" Jan 05 23:12:59 crc kubenswrapper[4910]: I0105 23:12:59.058947 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-2"] Jan 05 23:12:59 crc kubenswrapper[4910]: I0105 23:12:59.065570 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-2"] Jan 05 23:12:59 crc kubenswrapper[4910]: I0105 23:12:59.168365 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-79vqn\" (UniqueName: \"kubernetes.io/projected/c3e60145-66e6-43fe-9307-5f2bcac7bcb0-kube-api-access-79vqn\") pod \"c3e60145-66e6-43fe-9307-5f2bcac7bcb0\" (UID: \"c3e60145-66e6-43fe-9307-5f2bcac7bcb0\") " Jan 05 23:12:59 crc kubenswrapper[4910]: I0105 23:12:59.178536 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c3e60145-66e6-43fe-9307-5f2bcac7bcb0-kube-api-access-79vqn" (OuterVolumeSpecName: "kube-api-access-79vqn") pod "c3e60145-66e6-43fe-9307-5f2bcac7bcb0" (UID: "c3e60145-66e6-43fe-9307-5f2bcac7bcb0"). InnerVolumeSpecName "kube-api-access-79vqn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:12:59 crc kubenswrapper[4910]: I0105 23:12:59.271482 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-79vqn\" (UniqueName: \"kubernetes.io/projected/c3e60145-66e6-43fe-9307-5f2bcac7bcb0-kube-api-access-79vqn\") on node \"crc\" DevicePath \"\"" Jan 05 23:12:59 crc kubenswrapper[4910]: I0105 23:12:59.619763 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="76e3b18f248b09a1e165f24a37025517f0a93313c433c456f72ff183a5e990e2" Jan 05 23:12:59 crc kubenswrapper[4910]: I0105 23:12:59.619848 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Jan 05 23:13:00 crc kubenswrapper[4910]: I0105 23:13:00.738202 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c3e60145-66e6-43fe-9307-5f2bcac7bcb0" path="/var/lib/kubelet/pods/c3e60145-66e6-43fe-9307-5f2bcac7bcb0/volumes" Jan 05 23:13:44 crc kubenswrapper[4910]: I0105 23:13:44.281308 4910 scope.go:117] "RemoveContainer" containerID="ea48581f0cc67ccbdd71a320a19b6619bd0ba996b299034de9c35b5b3f514f4e" Jan 05 23:14:40 crc kubenswrapper[4910]: I0105 23:14:40.952986 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 23:14:40 crc kubenswrapper[4910]: I0105 23:14:40.953838 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 23:15:00 crc kubenswrapper[4910]: I0105 23:15:00.160839 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460915-vzb69"] Jan 05 23:15:00 crc kubenswrapper[4910]: E0105 23:15:00.161998 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3e60145-66e6-43fe-9307-5f2bcac7bcb0" containerName="mariadb-client-2" Jan 05 23:15:00 crc kubenswrapper[4910]: I0105 23:15:00.162021 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3e60145-66e6-43fe-9307-5f2bcac7bcb0" containerName="mariadb-client-2" Jan 05 23:15:00 crc kubenswrapper[4910]: I0105 23:15:00.162240 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3e60145-66e6-43fe-9307-5f2bcac7bcb0" containerName="mariadb-client-2" Jan 05 23:15:00 crc kubenswrapper[4910]: I0105 23:15:00.162930 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460915-vzb69" Jan 05 23:15:00 crc kubenswrapper[4910]: I0105 23:15:00.166868 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 05 23:15:00 crc kubenswrapper[4910]: I0105 23:15:00.167094 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 05 23:15:00 crc kubenswrapper[4910]: I0105 23:15:00.180612 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460915-vzb69"] Jan 05 23:15:00 crc kubenswrapper[4910]: I0105 23:15:00.193855 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sv4wc\" (UniqueName: \"kubernetes.io/projected/07d1fb0c-606e-46c1-93e9-fc1fafe920f0-kube-api-access-sv4wc\") pod \"collect-profiles-29460915-vzb69\" (UID: \"07d1fb0c-606e-46c1-93e9-fc1fafe920f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460915-vzb69" Jan 05 23:15:00 crc kubenswrapper[4910]: I0105 23:15:00.194536 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/07d1fb0c-606e-46c1-93e9-fc1fafe920f0-secret-volume\") pod \"collect-profiles-29460915-vzb69\" (UID: \"07d1fb0c-606e-46c1-93e9-fc1fafe920f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460915-vzb69" Jan 05 23:15:00 crc kubenswrapper[4910]: I0105 23:15:00.194830 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/07d1fb0c-606e-46c1-93e9-fc1fafe920f0-config-volume\") pod \"collect-profiles-29460915-vzb69\" (UID: \"07d1fb0c-606e-46c1-93e9-fc1fafe920f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460915-vzb69" Jan 05 23:15:00 crc kubenswrapper[4910]: I0105 23:15:00.296989 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sv4wc\" (UniqueName: \"kubernetes.io/projected/07d1fb0c-606e-46c1-93e9-fc1fafe920f0-kube-api-access-sv4wc\") pod \"collect-profiles-29460915-vzb69\" (UID: \"07d1fb0c-606e-46c1-93e9-fc1fafe920f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460915-vzb69" Jan 05 23:15:00 crc kubenswrapper[4910]: I0105 23:15:00.297544 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/07d1fb0c-606e-46c1-93e9-fc1fafe920f0-secret-volume\") pod \"collect-profiles-29460915-vzb69\" (UID: \"07d1fb0c-606e-46c1-93e9-fc1fafe920f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460915-vzb69" Jan 05 23:15:00 crc kubenswrapper[4910]: I0105 23:15:00.297643 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/07d1fb0c-606e-46c1-93e9-fc1fafe920f0-config-volume\") pod \"collect-profiles-29460915-vzb69\" (UID: \"07d1fb0c-606e-46c1-93e9-fc1fafe920f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460915-vzb69" Jan 05 23:15:00 crc kubenswrapper[4910]: I0105 23:15:00.299332 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/07d1fb0c-606e-46c1-93e9-fc1fafe920f0-config-volume\") pod \"collect-profiles-29460915-vzb69\" (UID: \"07d1fb0c-606e-46c1-93e9-fc1fafe920f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460915-vzb69" Jan 05 23:15:00 crc kubenswrapper[4910]: I0105 23:15:00.307388 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/07d1fb0c-606e-46c1-93e9-fc1fafe920f0-secret-volume\") pod \"collect-profiles-29460915-vzb69\" (UID: \"07d1fb0c-606e-46c1-93e9-fc1fafe920f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460915-vzb69" Jan 05 23:15:00 crc kubenswrapper[4910]: I0105 23:15:00.321574 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sv4wc\" (UniqueName: \"kubernetes.io/projected/07d1fb0c-606e-46c1-93e9-fc1fafe920f0-kube-api-access-sv4wc\") pod \"collect-profiles-29460915-vzb69\" (UID: \"07d1fb0c-606e-46c1-93e9-fc1fafe920f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460915-vzb69" Jan 05 23:15:00 crc kubenswrapper[4910]: I0105 23:15:00.490306 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460915-vzb69" Jan 05 23:15:01 crc kubenswrapper[4910]: I0105 23:15:01.041463 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460915-vzb69"] Jan 05 23:15:01 crc kubenswrapper[4910]: I0105 23:15:01.452952 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29460915-vzb69" event={"ID":"07d1fb0c-606e-46c1-93e9-fc1fafe920f0","Type":"ContainerStarted","Data":"50c35f1985c9e150d6c55ea39b7b5b4ca85c1b7980856b7f8d9906b136b5a08c"} Jan 05 23:15:01 crc kubenswrapper[4910]: I0105 23:15:01.453049 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29460915-vzb69" event={"ID":"07d1fb0c-606e-46c1-93e9-fc1fafe920f0","Type":"ContainerStarted","Data":"563e0afe8d4ab83b5f2a17a4c3290dad8511987c19b0a90b4337ce167d2caa1a"} Jan 05 23:15:01 crc kubenswrapper[4910]: I0105 23:15:01.486374 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29460915-vzb69" podStartSLOduration=1.486344981 podStartE2EDuration="1.486344981s" podCreationTimestamp="2026-01-05 23:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:15:01.478933077 +0000 UTC m=+5033.056430747" watchObservedRunningTime="2026-01-05 23:15:01.486344981 +0000 UTC m=+5033.063842661" Jan 05 23:15:02 crc kubenswrapper[4910]: I0105 23:15:02.465966 4910 generic.go:334] "Generic (PLEG): container finished" podID="07d1fb0c-606e-46c1-93e9-fc1fafe920f0" containerID="50c35f1985c9e150d6c55ea39b7b5b4ca85c1b7980856b7f8d9906b136b5a08c" exitCode=0 Jan 05 23:15:02 crc kubenswrapper[4910]: I0105 23:15:02.466055 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29460915-vzb69" event={"ID":"07d1fb0c-606e-46c1-93e9-fc1fafe920f0","Type":"ContainerDied","Data":"50c35f1985c9e150d6c55ea39b7b5b4ca85c1b7980856b7f8d9906b136b5a08c"} Jan 05 23:15:03 crc kubenswrapper[4910]: I0105 23:15:03.832569 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460915-vzb69" Jan 05 23:15:03 crc kubenswrapper[4910]: I0105 23:15:03.876053 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sv4wc\" (UniqueName: \"kubernetes.io/projected/07d1fb0c-606e-46c1-93e9-fc1fafe920f0-kube-api-access-sv4wc\") pod \"07d1fb0c-606e-46c1-93e9-fc1fafe920f0\" (UID: \"07d1fb0c-606e-46c1-93e9-fc1fafe920f0\") " Jan 05 23:15:03 crc kubenswrapper[4910]: I0105 23:15:03.876228 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/07d1fb0c-606e-46c1-93e9-fc1fafe920f0-config-volume\") pod \"07d1fb0c-606e-46c1-93e9-fc1fafe920f0\" (UID: \"07d1fb0c-606e-46c1-93e9-fc1fafe920f0\") " Jan 05 23:15:03 crc kubenswrapper[4910]: I0105 23:15:03.876277 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/07d1fb0c-606e-46c1-93e9-fc1fafe920f0-secret-volume\") pod \"07d1fb0c-606e-46c1-93e9-fc1fafe920f0\" (UID: \"07d1fb0c-606e-46c1-93e9-fc1fafe920f0\") " Jan 05 23:15:03 crc kubenswrapper[4910]: I0105 23:15:03.877221 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/07d1fb0c-606e-46c1-93e9-fc1fafe920f0-config-volume" (OuterVolumeSpecName: "config-volume") pod "07d1fb0c-606e-46c1-93e9-fc1fafe920f0" (UID: "07d1fb0c-606e-46c1-93e9-fc1fafe920f0"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:15:03 crc kubenswrapper[4910]: I0105 23:15:03.896898 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07d1fb0c-606e-46c1-93e9-fc1fafe920f0-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "07d1fb0c-606e-46c1-93e9-fc1fafe920f0" (UID: "07d1fb0c-606e-46c1-93e9-fc1fafe920f0"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:15:03 crc kubenswrapper[4910]: I0105 23:15:03.897796 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07d1fb0c-606e-46c1-93e9-fc1fafe920f0-kube-api-access-sv4wc" (OuterVolumeSpecName: "kube-api-access-sv4wc") pod "07d1fb0c-606e-46c1-93e9-fc1fafe920f0" (UID: "07d1fb0c-606e-46c1-93e9-fc1fafe920f0"). InnerVolumeSpecName "kube-api-access-sv4wc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:15:03 crc kubenswrapper[4910]: I0105 23:15:03.978448 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sv4wc\" (UniqueName: \"kubernetes.io/projected/07d1fb0c-606e-46c1-93e9-fc1fafe920f0-kube-api-access-sv4wc\") on node \"crc\" DevicePath \"\"" Jan 05 23:15:03 crc kubenswrapper[4910]: I0105 23:15:03.978504 4910 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/07d1fb0c-606e-46c1-93e9-fc1fafe920f0-config-volume\") on node \"crc\" DevicePath \"\"" Jan 05 23:15:03 crc kubenswrapper[4910]: I0105 23:15:03.978518 4910 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/07d1fb0c-606e-46c1-93e9-fc1fafe920f0-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 05 23:15:04 crc kubenswrapper[4910]: I0105 23:15:04.494077 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29460915-vzb69" event={"ID":"07d1fb0c-606e-46c1-93e9-fc1fafe920f0","Type":"ContainerDied","Data":"563e0afe8d4ab83b5f2a17a4c3290dad8511987c19b0a90b4337ce167d2caa1a"} Jan 05 23:15:04 crc kubenswrapper[4910]: I0105 23:15:04.494669 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="563e0afe8d4ab83b5f2a17a4c3290dad8511987c19b0a90b4337ce167d2caa1a" Jan 05 23:15:04 crc kubenswrapper[4910]: I0105 23:15:04.494204 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460915-vzb69" Jan 05 23:15:04 crc kubenswrapper[4910]: I0105 23:15:04.581924 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460870-jhg2c"] Jan 05 23:15:04 crc kubenswrapper[4910]: I0105 23:15:04.588210 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460870-jhg2c"] Jan 05 23:15:04 crc kubenswrapper[4910]: I0105 23:15:04.735276 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="46d6f885-2342-45f3-b84e-6ebd88cf4b2d" path="/var/lib/kubelet/pods/46d6f885-2342-45f3-b84e-6ebd88cf4b2d/volumes" Jan 05 23:15:10 crc kubenswrapper[4910]: I0105 23:15:10.952977 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 23:15:10 crc kubenswrapper[4910]: I0105 23:15:10.953903 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 23:15:40 crc kubenswrapper[4910]: I0105 23:15:40.952616 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 23:15:40 crc kubenswrapper[4910]: I0105 23:15:40.953555 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 23:15:40 crc kubenswrapper[4910]: I0105 23:15:40.953626 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 23:15:40 crc kubenswrapper[4910]: I0105 23:15:40.954388 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1b4947f16488761156b000cbce3970d8b169fa16ff0cca2579226d719a03df0b"} pod="openshift-machine-config-operator/machine-config-daemon-p4t85" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 05 23:15:40 crc kubenswrapper[4910]: I0105 23:15:40.954456 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" containerID="cri-o://1b4947f16488761156b000cbce3970d8b169fa16ff0cca2579226d719a03df0b" gracePeriod=600 Jan 05 23:15:41 crc kubenswrapper[4910]: I0105 23:15:41.935182 4910 generic.go:334] "Generic (PLEG): container finished" podID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerID="1b4947f16488761156b000cbce3970d8b169fa16ff0cca2579226d719a03df0b" exitCode=0 Jan 05 23:15:41 crc kubenswrapper[4910]: I0105 23:15:41.935260 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerDied","Data":"1b4947f16488761156b000cbce3970d8b169fa16ff0cca2579226d719a03df0b"} Jan 05 23:15:41 crc kubenswrapper[4910]: I0105 23:15:41.935928 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerStarted","Data":"c31f0b3ea3ce0d4b0fb62da87fc6d8144c3940f5821592c646ec514147dae31e"} Jan 05 23:15:41 crc kubenswrapper[4910]: I0105 23:15:41.935963 4910 scope.go:117] "RemoveContainer" containerID="a91b2ed1dd18abdbeee2358c6f332ea718c2107406ac7cbb009f035571ce325d" Jan 05 23:15:44 crc kubenswrapper[4910]: I0105 23:15:44.376791 4910 scope.go:117] "RemoveContainer" containerID="18d9a969858bfdb362a7efd7c806d89cfdc3200eff1915b88c9c54709ecf940c" Jan 05 23:16:11 crc kubenswrapper[4910]: I0105 23:16:11.151692 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-znfv7"] Jan 05 23:16:11 crc kubenswrapper[4910]: E0105 23:16:11.152939 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07d1fb0c-606e-46c1-93e9-fc1fafe920f0" containerName="collect-profiles" Jan 05 23:16:11 crc kubenswrapper[4910]: I0105 23:16:11.152960 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="07d1fb0c-606e-46c1-93e9-fc1fafe920f0" containerName="collect-profiles" Jan 05 23:16:11 crc kubenswrapper[4910]: I0105 23:16:11.153242 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="07d1fb0c-606e-46c1-93e9-fc1fafe920f0" containerName="collect-profiles" Jan 05 23:16:11 crc kubenswrapper[4910]: I0105 23:16:11.155280 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-znfv7" Jan 05 23:16:11 crc kubenswrapper[4910]: I0105 23:16:11.168713 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-znfv7"] Jan 05 23:16:11 crc kubenswrapper[4910]: I0105 23:16:11.196309 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e6020f7-25ec-4161-b102-444078737534-catalog-content\") pod \"community-operators-znfv7\" (UID: \"6e6020f7-25ec-4161-b102-444078737534\") " pod="openshift-marketplace/community-operators-znfv7" Jan 05 23:16:11 crc kubenswrapper[4910]: I0105 23:16:11.196374 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zrjwr\" (UniqueName: \"kubernetes.io/projected/6e6020f7-25ec-4161-b102-444078737534-kube-api-access-zrjwr\") pod \"community-operators-znfv7\" (UID: \"6e6020f7-25ec-4161-b102-444078737534\") " pod="openshift-marketplace/community-operators-znfv7" Jan 05 23:16:11 crc kubenswrapper[4910]: I0105 23:16:11.196416 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e6020f7-25ec-4161-b102-444078737534-utilities\") pod \"community-operators-znfv7\" (UID: \"6e6020f7-25ec-4161-b102-444078737534\") " pod="openshift-marketplace/community-operators-znfv7" Jan 05 23:16:11 crc kubenswrapper[4910]: I0105 23:16:11.298051 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e6020f7-25ec-4161-b102-444078737534-catalog-content\") pod \"community-operators-znfv7\" (UID: \"6e6020f7-25ec-4161-b102-444078737534\") " pod="openshift-marketplace/community-operators-znfv7" Jan 05 23:16:11 crc kubenswrapper[4910]: I0105 23:16:11.298140 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zrjwr\" (UniqueName: \"kubernetes.io/projected/6e6020f7-25ec-4161-b102-444078737534-kube-api-access-zrjwr\") pod \"community-operators-znfv7\" (UID: \"6e6020f7-25ec-4161-b102-444078737534\") " pod="openshift-marketplace/community-operators-znfv7" Jan 05 23:16:11 crc kubenswrapper[4910]: I0105 23:16:11.298185 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e6020f7-25ec-4161-b102-444078737534-utilities\") pod \"community-operators-znfv7\" (UID: \"6e6020f7-25ec-4161-b102-444078737534\") " pod="openshift-marketplace/community-operators-znfv7" Jan 05 23:16:11 crc kubenswrapper[4910]: I0105 23:16:11.298824 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e6020f7-25ec-4161-b102-444078737534-utilities\") pod \"community-operators-znfv7\" (UID: \"6e6020f7-25ec-4161-b102-444078737534\") " pod="openshift-marketplace/community-operators-znfv7" Jan 05 23:16:11 crc kubenswrapper[4910]: I0105 23:16:11.299453 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e6020f7-25ec-4161-b102-444078737534-catalog-content\") pod \"community-operators-znfv7\" (UID: \"6e6020f7-25ec-4161-b102-444078737534\") " pod="openshift-marketplace/community-operators-znfv7" Jan 05 23:16:11 crc kubenswrapper[4910]: I0105 23:16:11.319597 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zrjwr\" (UniqueName: \"kubernetes.io/projected/6e6020f7-25ec-4161-b102-444078737534-kube-api-access-zrjwr\") pod \"community-operators-znfv7\" (UID: \"6e6020f7-25ec-4161-b102-444078737534\") " pod="openshift-marketplace/community-operators-znfv7" Jan 05 23:16:11 crc kubenswrapper[4910]: I0105 23:16:11.514525 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-znfv7" Jan 05 23:16:11 crc kubenswrapper[4910]: I0105 23:16:11.927929 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-znfv7"] Jan 05 23:16:12 crc kubenswrapper[4910]: I0105 23:16:12.287053 4910 generic.go:334] "Generic (PLEG): container finished" podID="6e6020f7-25ec-4161-b102-444078737534" containerID="b55ecb8549509bfa7adf21dca93fa47e82b2e6e043abfaa9125e0c71bde8a150" exitCode=0 Jan 05 23:16:12 crc kubenswrapper[4910]: I0105 23:16:12.287107 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-znfv7" event={"ID":"6e6020f7-25ec-4161-b102-444078737534","Type":"ContainerDied","Data":"b55ecb8549509bfa7adf21dca93fa47e82b2e6e043abfaa9125e0c71bde8a150"} Jan 05 23:16:12 crc kubenswrapper[4910]: I0105 23:16:12.287162 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-znfv7" event={"ID":"6e6020f7-25ec-4161-b102-444078737534","Type":"ContainerStarted","Data":"228555846f36caf3708a563a963d3b13f6e3f69181b58b14ee7185b86d52e8ad"} Jan 05 23:16:13 crc kubenswrapper[4910]: I0105 23:16:13.303804 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-znfv7" event={"ID":"6e6020f7-25ec-4161-b102-444078737534","Type":"ContainerStarted","Data":"8f1628a55dae704d0d54c682be4cc2d87f5c29df01ddb6e8d6c74e1d698d4f1f"} Jan 05 23:16:14 crc kubenswrapper[4910]: I0105 23:16:14.315788 4910 generic.go:334] "Generic (PLEG): container finished" podID="6e6020f7-25ec-4161-b102-444078737534" containerID="8f1628a55dae704d0d54c682be4cc2d87f5c29df01ddb6e8d6c74e1d698d4f1f" exitCode=0 Jan 05 23:16:14 crc kubenswrapper[4910]: I0105 23:16:14.315943 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-znfv7" event={"ID":"6e6020f7-25ec-4161-b102-444078737534","Type":"ContainerDied","Data":"8f1628a55dae704d0d54c682be4cc2d87f5c29df01ddb6e8d6c74e1d698d4f1f"} Jan 05 23:16:15 crc kubenswrapper[4910]: I0105 23:16:15.334681 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-znfv7" event={"ID":"6e6020f7-25ec-4161-b102-444078737534","Type":"ContainerStarted","Data":"88fd9412da51fd960c6f2f730a52662f186b492859ed8cc537271ec20eabedd4"} Jan 05 23:16:15 crc kubenswrapper[4910]: I0105 23:16:15.374168 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-znfv7" podStartSLOduration=1.634459128 podStartE2EDuration="4.374107074s" podCreationTimestamp="2026-01-05 23:16:11 +0000 UTC" firstStartedPulling="2026-01-05 23:16:12.290098199 +0000 UTC m=+5103.867595899" lastFinishedPulling="2026-01-05 23:16:15.029746155 +0000 UTC m=+5106.607243845" observedRunningTime="2026-01-05 23:16:15.362335153 +0000 UTC m=+5106.939832843" watchObservedRunningTime="2026-01-05 23:16:15.374107074 +0000 UTC m=+5106.951604754" Jan 05 23:16:21 crc kubenswrapper[4910]: I0105 23:16:21.515012 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-znfv7" Jan 05 23:16:21 crc kubenswrapper[4910]: I0105 23:16:21.515746 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-znfv7" Jan 05 23:16:21 crc kubenswrapper[4910]: I0105 23:16:21.595661 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-znfv7" Jan 05 23:16:22 crc kubenswrapper[4910]: I0105 23:16:22.464548 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-znfv7" Jan 05 23:16:22 crc kubenswrapper[4910]: I0105 23:16:22.529801 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-znfv7"] Jan 05 23:16:24 crc kubenswrapper[4910]: I0105 23:16:24.427320 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-znfv7" podUID="6e6020f7-25ec-4161-b102-444078737534" containerName="registry-server" containerID="cri-o://88fd9412da51fd960c6f2f730a52662f186b492859ed8cc537271ec20eabedd4" gracePeriod=2 Jan 05 23:16:25 crc kubenswrapper[4910]: I0105 23:16:25.409816 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-znfv7" Jan 05 23:16:25 crc kubenswrapper[4910]: I0105 23:16:25.445246 4910 generic.go:334] "Generic (PLEG): container finished" podID="6e6020f7-25ec-4161-b102-444078737534" containerID="88fd9412da51fd960c6f2f730a52662f186b492859ed8cc537271ec20eabedd4" exitCode=0 Jan 05 23:16:25 crc kubenswrapper[4910]: I0105 23:16:25.445312 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-znfv7" event={"ID":"6e6020f7-25ec-4161-b102-444078737534","Type":"ContainerDied","Data":"88fd9412da51fd960c6f2f730a52662f186b492859ed8cc537271ec20eabedd4"} Jan 05 23:16:25 crc kubenswrapper[4910]: I0105 23:16:25.445354 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-znfv7" event={"ID":"6e6020f7-25ec-4161-b102-444078737534","Type":"ContainerDied","Data":"228555846f36caf3708a563a963d3b13f6e3f69181b58b14ee7185b86d52e8ad"} Jan 05 23:16:25 crc kubenswrapper[4910]: I0105 23:16:25.445385 4910 scope.go:117] "RemoveContainer" containerID="88fd9412da51fd960c6f2f730a52662f186b492859ed8cc537271ec20eabedd4" Jan 05 23:16:25 crc kubenswrapper[4910]: I0105 23:16:25.445577 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-znfv7" Jan 05 23:16:25 crc kubenswrapper[4910]: I0105 23:16:25.474319 4910 scope.go:117] "RemoveContainer" containerID="8f1628a55dae704d0d54c682be4cc2d87f5c29df01ddb6e8d6c74e1d698d4f1f" Jan 05 23:16:25 crc kubenswrapper[4910]: I0105 23:16:25.504846 4910 scope.go:117] "RemoveContainer" containerID="b55ecb8549509bfa7adf21dca93fa47e82b2e6e043abfaa9125e0c71bde8a150" Jan 05 23:16:25 crc kubenswrapper[4910]: I0105 23:16:25.540231 4910 scope.go:117] "RemoveContainer" containerID="88fd9412da51fd960c6f2f730a52662f186b492859ed8cc537271ec20eabedd4" Jan 05 23:16:25 crc kubenswrapper[4910]: E0105 23:16:25.540903 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"88fd9412da51fd960c6f2f730a52662f186b492859ed8cc537271ec20eabedd4\": container with ID starting with 88fd9412da51fd960c6f2f730a52662f186b492859ed8cc537271ec20eabedd4 not found: ID does not exist" containerID="88fd9412da51fd960c6f2f730a52662f186b492859ed8cc537271ec20eabedd4" Jan 05 23:16:25 crc kubenswrapper[4910]: I0105 23:16:25.540948 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88fd9412da51fd960c6f2f730a52662f186b492859ed8cc537271ec20eabedd4"} err="failed to get container status \"88fd9412da51fd960c6f2f730a52662f186b492859ed8cc537271ec20eabedd4\": rpc error: code = NotFound desc = could not find container \"88fd9412da51fd960c6f2f730a52662f186b492859ed8cc537271ec20eabedd4\": container with ID starting with 88fd9412da51fd960c6f2f730a52662f186b492859ed8cc537271ec20eabedd4 not found: ID does not exist" Jan 05 23:16:25 crc kubenswrapper[4910]: I0105 23:16:25.540977 4910 scope.go:117] "RemoveContainer" containerID="8f1628a55dae704d0d54c682be4cc2d87f5c29df01ddb6e8d6c74e1d698d4f1f" Jan 05 23:16:25 crc kubenswrapper[4910]: E0105 23:16:25.541448 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8f1628a55dae704d0d54c682be4cc2d87f5c29df01ddb6e8d6c74e1d698d4f1f\": container with ID starting with 8f1628a55dae704d0d54c682be4cc2d87f5c29df01ddb6e8d6c74e1d698d4f1f not found: ID does not exist" containerID="8f1628a55dae704d0d54c682be4cc2d87f5c29df01ddb6e8d6c74e1d698d4f1f" Jan 05 23:16:25 crc kubenswrapper[4910]: I0105 23:16:25.541481 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f1628a55dae704d0d54c682be4cc2d87f5c29df01ddb6e8d6c74e1d698d4f1f"} err="failed to get container status \"8f1628a55dae704d0d54c682be4cc2d87f5c29df01ddb6e8d6c74e1d698d4f1f\": rpc error: code = NotFound desc = could not find container \"8f1628a55dae704d0d54c682be4cc2d87f5c29df01ddb6e8d6c74e1d698d4f1f\": container with ID starting with 8f1628a55dae704d0d54c682be4cc2d87f5c29df01ddb6e8d6c74e1d698d4f1f not found: ID does not exist" Jan 05 23:16:25 crc kubenswrapper[4910]: I0105 23:16:25.541502 4910 scope.go:117] "RemoveContainer" containerID="b55ecb8549509bfa7adf21dca93fa47e82b2e6e043abfaa9125e0c71bde8a150" Jan 05 23:16:25 crc kubenswrapper[4910]: E0105 23:16:25.541895 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b55ecb8549509bfa7adf21dca93fa47e82b2e6e043abfaa9125e0c71bde8a150\": container with ID starting with b55ecb8549509bfa7adf21dca93fa47e82b2e6e043abfaa9125e0c71bde8a150 not found: ID does not exist" containerID="b55ecb8549509bfa7adf21dca93fa47e82b2e6e043abfaa9125e0c71bde8a150" Jan 05 23:16:25 crc kubenswrapper[4910]: I0105 23:16:25.541925 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b55ecb8549509bfa7adf21dca93fa47e82b2e6e043abfaa9125e0c71bde8a150"} err="failed to get container status \"b55ecb8549509bfa7adf21dca93fa47e82b2e6e043abfaa9125e0c71bde8a150\": rpc error: code = NotFound desc = could not find container \"b55ecb8549509bfa7adf21dca93fa47e82b2e6e043abfaa9125e0c71bde8a150\": container with ID starting with b55ecb8549509bfa7adf21dca93fa47e82b2e6e043abfaa9125e0c71bde8a150 not found: ID does not exist" Jan 05 23:16:25 crc kubenswrapper[4910]: I0105 23:16:25.582000 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e6020f7-25ec-4161-b102-444078737534-utilities\") pod \"6e6020f7-25ec-4161-b102-444078737534\" (UID: \"6e6020f7-25ec-4161-b102-444078737534\") " Jan 05 23:16:25 crc kubenswrapper[4910]: I0105 23:16:25.582086 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zrjwr\" (UniqueName: \"kubernetes.io/projected/6e6020f7-25ec-4161-b102-444078737534-kube-api-access-zrjwr\") pod \"6e6020f7-25ec-4161-b102-444078737534\" (UID: \"6e6020f7-25ec-4161-b102-444078737534\") " Jan 05 23:16:25 crc kubenswrapper[4910]: I0105 23:16:25.582223 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e6020f7-25ec-4161-b102-444078737534-catalog-content\") pod \"6e6020f7-25ec-4161-b102-444078737534\" (UID: \"6e6020f7-25ec-4161-b102-444078737534\") " Jan 05 23:16:25 crc kubenswrapper[4910]: I0105 23:16:25.586813 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e6020f7-25ec-4161-b102-444078737534-utilities" (OuterVolumeSpecName: "utilities") pod "6e6020f7-25ec-4161-b102-444078737534" (UID: "6e6020f7-25ec-4161-b102-444078737534"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:16:25 crc kubenswrapper[4910]: I0105 23:16:25.605400 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e6020f7-25ec-4161-b102-444078737534-kube-api-access-zrjwr" (OuterVolumeSpecName: "kube-api-access-zrjwr") pod "6e6020f7-25ec-4161-b102-444078737534" (UID: "6e6020f7-25ec-4161-b102-444078737534"). InnerVolumeSpecName "kube-api-access-zrjwr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:16:25 crc kubenswrapper[4910]: I0105 23:16:25.680688 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e6020f7-25ec-4161-b102-444078737534-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6e6020f7-25ec-4161-b102-444078737534" (UID: "6e6020f7-25ec-4161-b102-444078737534"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:16:25 crc kubenswrapper[4910]: I0105 23:16:25.683898 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e6020f7-25ec-4161-b102-444078737534-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 23:16:25 crc kubenswrapper[4910]: I0105 23:16:25.683929 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e6020f7-25ec-4161-b102-444078737534-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 23:16:25 crc kubenswrapper[4910]: I0105 23:16:25.683943 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zrjwr\" (UniqueName: \"kubernetes.io/projected/6e6020f7-25ec-4161-b102-444078737534-kube-api-access-zrjwr\") on node \"crc\" DevicePath \"\"" Jan 05 23:16:25 crc kubenswrapper[4910]: I0105 23:16:25.794837 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-znfv7"] Jan 05 23:16:25 crc kubenswrapper[4910]: I0105 23:16:25.801924 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-znfv7"] Jan 05 23:16:26 crc kubenswrapper[4910]: I0105 23:16:26.739978 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e6020f7-25ec-4161-b102-444078737534" path="/var/lib/kubelet/pods/6e6020f7-25ec-4161-b102-444078737534/volumes" Jan 05 23:16:44 crc kubenswrapper[4910]: I0105 23:16:44.479429 4910 scope.go:117] "RemoveContainer" containerID="c54151f6229253593ca70479ea8e470a4d6fd1cbbfd73c9e7950940175c8a418" Jan 05 23:17:04 crc kubenswrapper[4910]: I0105 23:17:04.900487 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-copy-data"] Jan 05 23:17:04 crc kubenswrapper[4910]: E0105 23:17:04.901863 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e6020f7-25ec-4161-b102-444078737534" containerName="registry-server" Jan 05 23:17:04 crc kubenswrapper[4910]: I0105 23:17:04.901892 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e6020f7-25ec-4161-b102-444078737534" containerName="registry-server" Jan 05 23:17:04 crc kubenswrapper[4910]: E0105 23:17:04.901921 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e6020f7-25ec-4161-b102-444078737534" containerName="extract-utilities" Jan 05 23:17:04 crc kubenswrapper[4910]: I0105 23:17:04.901935 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e6020f7-25ec-4161-b102-444078737534" containerName="extract-utilities" Jan 05 23:17:04 crc kubenswrapper[4910]: E0105 23:17:04.901963 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e6020f7-25ec-4161-b102-444078737534" containerName="extract-content" Jan 05 23:17:04 crc kubenswrapper[4910]: I0105 23:17:04.901978 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e6020f7-25ec-4161-b102-444078737534" containerName="extract-content" Jan 05 23:17:04 crc kubenswrapper[4910]: I0105 23:17:04.902330 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e6020f7-25ec-4161-b102-444078737534" containerName="registry-server" Jan 05 23:17:04 crc kubenswrapper[4910]: I0105 23:17:04.903317 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Jan 05 23:17:04 crc kubenswrapper[4910]: I0105 23:17:04.906885 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-kn5kc" Jan 05 23:17:04 crc kubenswrapper[4910]: I0105 23:17:04.911608 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-copy-data"] Jan 05 23:17:04 crc kubenswrapper[4910]: I0105 23:17:04.958143 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wtjpc\" (UniqueName: \"kubernetes.io/projected/bfa6bb42-5c0e-4ef6-9378-d52fa1fbfb8c-kube-api-access-wtjpc\") pod \"mariadb-copy-data\" (UID: \"bfa6bb42-5c0e-4ef6-9378-d52fa1fbfb8c\") " pod="openstack/mariadb-copy-data" Jan 05 23:17:04 crc kubenswrapper[4910]: I0105 23:17:04.958237 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-8021a5d9-2dea-4cef-8612-cc1f5deb99c7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8021a5d9-2dea-4cef-8612-cc1f5deb99c7\") pod \"mariadb-copy-data\" (UID: \"bfa6bb42-5c0e-4ef6-9378-d52fa1fbfb8c\") " pod="openstack/mariadb-copy-data" Jan 05 23:17:05 crc kubenswrapper[4910]: I0105 23:17:05.059642 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wtjpc\" (UniqueName: \"kubernetes.io/projected/bfa6bb42-5c0e-4ef6-9378-d52fa1fbfb8c-kube-api-access-wtjpc\") pod \"mariadb-copy-data\" (UID: \"bfa6bb42-5c0e-4ef6-9378-d52fa1fbfb8c\") " pod="openstack/mariadb-copy-data" Jan 05 23:17:05 crc kubenswrapper[4910]: I0105 23:17:05.059800 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-8021a5d9-2dea-4cef-8612-cc1f5deb99c7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8021a5d9-2dea-4cef-8612-cc1f5deb99c7\") pod \"mariadb-copy-data\" (UID: \"bfa6bb42-5c0e-4ef6-9378-d52fa1fbfb8c\") " pod="openstack/mariadb-copy-data" Jan 05 23:17:05 crc kubenswrapper[4910]: I0105 23:17:05.065863 4910 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 05 23:17:05 crc kubenswrapper[4910]: I0105 23:17:05.065906 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-8021a5d9-2dea-4cef-8612-cc1f5deb99c7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8021a5d9-2dea-4cef-8612-cc1f5deb99c7\") pod \"mariadb-copy-data\" (UID: \"bfa6bb42-5c0e-4ef6-9378-d52fa1fbfb8c\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/fd2544ea5235a1d86d8736b609e4b04dc7fe9d691a1b36a4e65a55eeb302c835/globalmount\"" pod="openstack/mariadb-copy-data" Jan 05 23:17:05 crc kubenswrapper[4910]: I0105 23:17:05.089750 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wtjpc\" (UniqueName: \"kubernetes.io/projected/bfa6bb42-5c0e-4ef6-9378-d52fa1fbfb8c-kube-api-access-wtjpc\") pod \"mariadb-copy-data\" (UID: \"bfa6bb42-5c0e-4ef6-9378-d52fa1fbfb8c\") " pod="openstack/mariadb-copy-data" Jan 05 23:17:05 crc kubenswrapper[4910]: I0105 23:17:05.110602 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-8021a5d9-2dea-4cef-8612-cc1f5deb99c7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8021a5d9-2dea-4cef-8612-cc1f5deb99c7\") pod \"mariadb-copy-data\" (UID: \"bfa6bb42-5c0e-4ef6-9378-d52fa1fbfb8c\") " pod="openstack/mariadb-copy-data" Jan 05 23:17:05 crc kubenswrapper[4910]: I0105 23:17:05.244057 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Jan 05 23:17:05 crc kubenswrapper[4910]: I0105 23:17:05.654356 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-copy-data"] Jan 05 23:17:05 crc kubenswrapper[4910]: I0105 23:17:05.862787 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"bfa6bb42-5c0e-4ef6-9378-d52fa1fbfb8c","Type":"ContainerStarted","Data":"8a9ba839d0845243b364328cf0cecfe83db8a4f017dc271c7f85dfa5f991a894"} Jan 05 23:17:05 crc kubenswrapper[4910]: I0105 23:17:05.864056 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"bfa6bb42-5c0e-4ef6-9378-d52fa1fbfb8c","Type":"ContainerStarted","Data":"90df20e2a70ed8b893c59f8e424d82ca6eb318cb59e5d78f9c0c319567458e63"} Jan 05 23:17:05 crc kubenswrapper[4910]: I0105 23:17:05.885644 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mariadb-copy-data" podStartSLOduration=2.8856145140000002 podStartE2EDuration="2.885614514s" podCreationTimestamp="2026-01-05 23:17:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:17:05.881758089 +0000 UTC m=+5157.459255799" watchObservedRunningTime="2026-01-05 23:17:05.885614514 +0000 UTC m=+5157.463112224" Jan 05 23:17:09 crc kubenswrapper[4910]: I0105 23:17:09.213291 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client"] Jan 05 23:17:09 crc kubenswrapper[4910]: I0105 23:17:09.215103 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 05 23:17:09 crc kubenswrapper[4910]: I0105 23:17:09.227388 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Jan 05 23:17:09 crc kubenswrapper[4910]: I0105 23:17:09.242547 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vsft2\" (UniqueName: \"kubernetes.io/projected/37afb6f3-c608-4856-a529-42bb08d3b2a0-kube-api-access-vsft2\") pod \"mariadb-client\" (UID: \"37afb6f3-c608-4856-a529-42bb08d3b2a0\") " pod="openstack/mariadb-client" Jan 05 23:17:09 crc kubenswrapper[4910]: I0105 23:17:09.344916 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vsft2\" (UniqueName: \"kubernetes.io/projected/37afb6f3-c608-4856-a529-42bb08d3b2a0-kube-api-access-vsft2\") pod \"mariadb-client\" (UID: \"37afb6f3-c608-4856-a529-42bb08d3b2a0\") " pod="openstack/mariadb-client" Jan 05 23:17:09 crc kubenswrapper[4910]: I0105 23:17:09.364820 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vsft2\" (UniqueName: \"kubernetes.io/projected/37afb6f3-c608-4856-a529-42bb08d3b2a0-kube-api-access-vsft2\") pod \"mariadb-client\" (UID: \"37afb6f3-c608-4856-a529-42bb08d3b2a0\") " pod="openstack/mariadb-client" Jan 05 23:17:09 crc kubenswrapper[4910]: I0105 23:17:09.546517 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 05 23:17:10 crc kubenswrapper[4910]: I0105 23:17:10.030402 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Jan 05 23:17:10 crc kubenswrapper[4910]: I0105 23:17:10.924710 4910 generic.go:334] "Generic (PLEG): container finished" podID="37afb6f3-c608-4856-a529-42bb08d3b2a0" containerID="aca55919b4bbf7bf8565e479b1f9fb4b5ef492b39ca569a284d519a082c68634" exitCode=0 Jan 05 23:17:10 crc kubenswrapper[4910]: I0105 23:17:10.924761 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"37afb6f3-c608-4856-a529-42bb08d3b2a0","Type":"ContainerDied","Data":"aca55919b4bbf7bf8565e479b1f9fb4b5ef492b39ca569a284d519a082c68634"} Jan 05 23:17:10 crc kubenswrapper[4910]: I0105 23:17:10.925149 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"37afb6f3-c608-4856-a529-42bb08d3b2a0","Type":"ContainerStarted","Data":"1cc767e6cc4afb08ec6ae4feeb569cdd607edf4afd8afb9bb9b58667cb70c4ee"} Jan 05 23:17:12 crc kubenswrapper[4910]: I0105 23:17:12.326801 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 05 23:17:12 crc kubenswrapper[4910]: I0105 23:17:12.357952 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client_37afb6f3-c608-4856-a529-42bb08d3b2a0/mariadb-client/0.log" Jan 05 23:17:12 crc kubenswrapper[4910]: I0105 23:17:12.392535 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Jan 05 23:17:12 crc kubenswrapper[4910]: I0105 23:17:12.397690 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client"] Jan 05 23:17:12 crc kubenswrapper[4910]: I0105 23:17:12.469355 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vsft2\" (UniqueName: \"kubernetes.io/projected/37afb6f3-c608-4856-a529-42bb08d3b2a0-kube-api-access-vsft2\") pod \"37afb6f3-c608-4856-a529-42bb08d3b2a0\" (UID: \"37afb6f3-c608-4856-a529-42bb08d3b2a0\") " Jan 05 23:17:12 crc kubenswrapper[4910]: I0105 23:17:12.479569 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37afb6f3-c608-4856-a529-42bb08d3b2a0-kube-api-access-vsft2" (OuterVolumeSpecName: "kube-api-access-vsft2") pod "37afb6f3-c608-4856-a529-42bb08d3b2a0" (UID: "37afb6f3-c608-4856-a529-42bb08d3b2a0"). InnerVolumeSpecName "kube-api-access-vsft2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:17:12 crc kubenswrapper[4910]: I0105 23:17:12.571968 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vsft2\" (UniqueName: \"kubernetes.io/projected/37afb6f3-c608-4856-a529-42bb08d3b2a0-kube-api-access-vsft2\") on node \"crc\" DevicePath \"\"" Jan 05 23:17:12 crc kubenswrapper[4910]: I0105 23:17:12.668703 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client"] Jan 05 23:17:12 crc kubenswrapper[4910]: E0105 23:17:12.669322 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37afb6f3-c608-4856-a529-42bb08d3b2a0" containerName="mariadb-client" Jan 05 23:17:12 crc kubenswrapper[4910]: I0105 23:17:12.669344 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="37afb6f3-c608-4856-a529-42bb08d3b2a0" containerName="mariadb-client" Jan 05 23:17:12 crc kubenswrapper[4910]: I0105 23:17:12.669549 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="37afb6f3-c608-4856-a529-42bb08d3b2a0" containerName="mariadb-client" Jan 05 23:17:12 crc kubenswrapper[4910]: I0105 23:17:12.670322 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 05 23:17:12 crc kubenswrapper[4910]: I0105 23:17:12.682803 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Jan 05 23:17:12 crc kubenswrapper[4910]: I0105 23:17:12.737972 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37afb6f3-c608-4856-a529-42bb08d3b2a0" path="/var/lib/kubelet/pods/37afb6f3-c608-4856-a529-42bb08d3b2a0/volumes" Jan 05 23:17:12 crc kubenswrapper[4910]: I0105 23:17:12.774843 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sn5f4\" (UniqueName: \"kubernetes.io/projected/c1106087-c536-4c6b-b28d-b8747492fa66-kube-api-access-sn5f4\") pod \"mariadb-client\" (UID: \"c1106087-c536-4c6b-b28d-b8747492fa66\") " pod="openstack/mariadb-client" Jan 05 23:17:12 crc kubenswrapper[4910]: I0105 23:17:12.877030 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sn5f4\" (UniqueName: \"kubernetes.io/projected/c1106087-c536-4c6b-b28d-b8747492fa66-kube-api-access-sn5f4\") pod \"mariadb-client\" (UID: \"c1106087-c536-4c6b-b28d-b8747492fa66\") " pod="openstack/mariadb-client" Jan 05 23:17:12 crc kubenswrapper[4910]: I0105 23:17:12.902499 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sn5f4\" (UniqueName: \"kubernetes.io/projected/c1106087-c536-4c6b-b28d-b8747492fa66-kube-api-access-sn5f4\") pod \"mariadb-client\" (UID: \"c1106087-c536-4c6b-b28d-b8747492fa66\") " pod="openstack/mariadb-client" Jan 05 23:17:12 crc kubenswrapper[4910]: I0105 23:17:12.948828 4910 scope.go:117] "RemoveContainer" containerID="aca55919b4bbf7bf8565e479b1f9fb4b5ef492b39ca569a284d519a082c68634" Jan 05 23:17:12 crc kubenswrapper[4910]: I0105 23:17:12.948911 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 05 23:17:13 crc kubenswrapper[4910]: I0105 23:17:13.035168 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 05 23:17:13 crc kubenswrapper[4910]: I0105 23:17:13.323206 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Jan 05 23:17:13 crc kubenswrapper[4910]: W0105 23:17:13.330644 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc1106087_c536_4c6b_b28d_b8747492fa66.slice/crio-4439404d426a6f19fcbcd5add09e13e1da4ce6a71bcfd22511d7e0a2ba6347d2 WatchSource:0}: Error finding container 4439404d426a6f19fcbcd5add09e13e1da4ce6a71bcfd22511d7e0a2ba6347d2: Status 404 returned error can't find the container with id 4439404d426a6f19fcbcd5add09e13e1da4ce6a71bcfd22511d7e0a2ba6347d2 Jan 05 23:17:13 crc kubenswrapper[4910]: I0105 23:17:13.963643 4910 generic.go:334] "Generic (PLEG): container finished" podID="c1106087-c536-4c6b-b28d-b8747492fa66" containerID="51d83ea2b54372b0736ddc27ea1d88509b3cf17f1e4fe5c287c70d72e4a57d21" exitCode=0 Jan 05 23:17:13 crc kubenswrapper[4910]: I0105 23:17:13.963768 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"c1106087-c536-4c6b-b28d-b8747492fa66","Type":"ContainerDied","Data":"51d83ea2b54372b0736ddc27ea1d88509b3cf17f1e4fe5c287c70d72e4a57d21"} Jan 05 23:17:13 crc kubenswrapper[4910]: I0105 23:17:13.964108 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"c1106087-c536-4c6b-b28d-b8747492fa66","Type":"ContainerStarted","Data":"4439404d426a6f19fcbcd5add09e13e1da4ce6a71bcfd22511d7e0a2ba6347d2"} Jan 05 23:17:15 crc kubenswrapper[4910]: I0105 23:17:15.301168 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 05 23:17:15 crc kubenswrapper[4910]: I0105 23:17:15.321783 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client_c1106087-c536-4c6b-b28d-b8747492fa66/mariadb-client/0.log" Jan 05 23:17:15 crc kubenswrapper[4910]: I0105 23:17:15.354220 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Jan 05 23:17:15 crc kubenswrapper[4910]: I0105 23:17:15.362788 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client"] Jan 05 23:17:15 crc kubenswrapper[4910]: I0105 23:17:15.429537 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sn5f4\" (UniqueName: \"kubernetes.io/projected/c1106087-c536-4c6b-b28d-b8747492fa66-kube-api-access-sn5f4\") pod \"c1106087-c536-4c6b-b28d-b8747492fa66\" (UID: \"c1106087-c536-4c6b-b28d-b8747492fa66\") " Jan 05 23:17:15 crc kubenswrapper[4910]: I0105 23:17:15.439069 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1106087-c536-4c6b-b28d-b8747492fa66-kube-api-access-sn5f4" (OuterVolumeSpecName: "kube-api-access-sn5f4") pod "c1106087-c536-4c6b-b28d-b8747492fa66" (UID: "c1106087-c536-4c6b-b28d-b8747492fa66"). InnerVolumeSpecName "kube-api-access-sn5f4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:17:15 crc kubenswrapper[4910]: I0105 23:17:15.532728 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sn5f4\" (UniqueName: \"kubernetes.io/projected/c1106087-c536-4c6b-b28d-b8747492fa66-kube-api-access-sn5f4\") on node \"crc\" DevicePath \"\"" Jan 05 23:17:15 crc kubenswrapper[4910]: I0105 23:17:15.983501 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4439404d426a6f19fcbcd5add09e13e1da4ce6a71bcfd22511d7e0a2ba6347d2" Jan 05 23:17:15 crc kubenswrapper[4910]: I0105 23:17:15.983588 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 05 23:17:16 crc kubenswrapper[4910]: I0105 23:17:16.745077 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c1106087-c536-4c6b-b28d-b8747492fa66" path="/var/lib/kubelet/pods/c1106087-c536-4c6b-b28d-b8747492fa66/volumes" Jan 05 23:17:48 crc kubenswrapper[4910]: I0105 23:17:48.924230 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 05 23:17:48 crc kubenswrapper[4910]: E0105 23:17:48.925836 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1106087-c536-4c6b-b28d-b8747492fa66" containerName="mariadb-client" Jan 05 23:17:48 crc kubenswrapper[4910]: I0105 23:17:48.925873 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1106087-c536-4c6b-b28d-b8747492fa66" containerName="mariadb-client" Jan 05 23:17:48 crc kubenswrapper[4910]: I0105 23:17:48.926224 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1106087-c536-4c6b-b28d-b8747492fa66" containerName="mariadb-client" Jan 05 23:17:48 crc kubenswrapper[4910]: I0105 23:17:48.927870 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 05 23:17:48 crc kubenswrapper[4910]: I0105 23:17:48.948200 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-1"] Jan 05 23:17:48 crc kubenswrapper[4910]: I0105 23:17:48.949998 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 05 23:17:48 crc kubenswrapper[4910]: I0105 23:17:48.950202 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-1" Jan 05 23:17:48 crc kubenswrapper[4910]: I0105 23:17:48.970084 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Jan 05 23:17:48 crc kubenswrapper[4910]: I0105 23:17:48.972600 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Jan 05 23:17:48 crc kubenswrapper[4910]: I0105 23:17:48.972845 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-f2dr7" Jan 05 23:17:48 crc kubenswrapper[4910]: I0105 23:17:48.990271 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-2"] Jan 05 23:17:48 crc kubenswrapper[4910]: I0105 23:17:48.993451 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.000636 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-1"] Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.018497 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-2"] Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.076346 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c4bebcb-a217-4298-a65a-bc6bc3e22a12-config\") pod \"ovsdbserver-nb-0\" (UID: \"2c4bebcb-a217-4298-a65a-bc6bc3e22a12\") " pod="openstack/ovsdbserver-nb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.076429 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-496d9cad-db35-4d37-a2dc-63e8d2c33c29\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-496d9cad-db35-4d37-a2dc-63e8d2c33c29\") pod \"ovsdbserver-nb-0\" (UID: \"2c4bebcb-a217-4298-a65a-bc6bc3e22a12\") " pod="openstack/ovsdbserver-nb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.076487 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ktwr\" (UniqueName: \"kubernetes.io/projected/591431b6-fd67-4c89-ade6-029bd9e33d62-kube-api-access-5ktwr\") pod \"ovsdbserver-nb-1\" (UID: \"591431b6-fd67-4c89-ade6-029bd9e33d62\") " pod="openstack/ovsdbserver-nb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.076540 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c4bebcb-a217-4298-a65a-bc6bc3e22a12-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"2c4bebcb-a217-4298-a65a-bc6bc3e22a12\") " pod="openstack/ovsdbserver-nb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.076694 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/591431b6-fd67-4c89-ade6-029bd9e33d62-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"591431b6-fd67-4c89-ade6-029bd9e33d62\") " pod="openstack/ovsdbserver-nb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.076726 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/591431b6-fd67-4c89-ade6-029bd9e33d62-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"591431b6-fd67-4c89-ade6-029bd9e33d62\") " pod="openstack/ovsdbserver-nb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.076757 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-502268bd-a16e-4893-8d84-7762a4e6237f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-502268bd-a16e-4893-8d84-7762a4e6237f\") pod \"ovsdbserver-nb-1\" (UID: \"591431b6-fd67-4c89-ade6-029bd9e33d62\") " pod="openstack/ovsdbserver-nb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.076821 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/591431b6-fd67-4c89-ade6-029bd9e33d62-config\") pod \"ovsdbserver-nb-1\" (UID: \"591431b6-fd67-4c89-ade6-029bd9e33d62\") " pod="openstack/ovsdbserver-nb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.076858 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hzqn\" (UniqueName: \"kubernetes.io/projected/2c4bebcb-a217-4298-a65a-bc6bc3e22a12-kube-api-access-6hzqn\") pod \"ovsdbserver-nb-0\" (UID: \"2c4bebcb-a217-4298-a65a-bc6bc3e22a12\") " pod="openstack/ovsdbserver-nb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.076895 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/591431b6-fd67-4c89-ade6-029bd9e33d62-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"591431b6-fd67-4c89-ade6-029bd9e33d62\") " pod="openstack/ovsdbserver-nb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.076960 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2c4bebcb-a217-4298-a65a-bc6bc3e22a12-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"2c4bebcb-a217-4298-a65a-bc6bc3e22a12\") " pod="openstack/ovsdbserver-nb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.076982 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/2c4bebcb-a217-4298-a65a-bc6bc3e22a12-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"2c4bebcb-a217-4298-a65a-bc6bc3e22a12\") " pod="openstack/ovsdbserver-nb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.110631 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.112693 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.115568 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-x6qml" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.120138 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.120289 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.122218 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.146166 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-1"] Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.147817 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.159943 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-2"] Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.161749 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.168148 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-1"] Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.178409 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/455e6927-176e-4136-aeb8-17cebb8f16a6-config\") pod \"ovsdbserver-nb-2\" (UID: \"455e6927-176e-4136-aeb8-17cebb8f16a6\") " pod="openstack/ovsdbserver-nb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.178489 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2c4bebcb-a217-4298-a65a-bc6bc3e22a12-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"2c4bebcb-a217-4298-a65a-bc6bc3e22a12\") " pod="openstack/ovsdbserver-nb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.178518 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/2c4bebcb-a217-4298-a65a-bc6bc3e22a12-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"2c4bebcb-a217-4298-a65a-bc6bc3e22a12\") " pod="openstack/ovsdbserver-nb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.178551 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c4bebcb-a217-4298-a65a-bc6bc3e22a12-config\") pod \"ovsdbserver-nb-0\" (UID: \"2c4bebcb-a217-4298-a65a-bc6bc3e22a12\") " pod="openstack/ovsdbserver-nb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.178590 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-496d9cad-db35-4d37-a2dc-63e8d2c33c29\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-496d9cad-db35-4d37-a2dc-63e8d2c33c29\") pod \"ovsdbserver-nb-0\" (UID: \"2c4bebcb-a217-4298-a65a-bc6bc3e22a12\") " pod="openstack/ovsdbserver-nb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.178632 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ktwr\" (UniqueName: \"kubernetes.io/projected/591431b6-fd67-4c89-ade6-029bd9e33d62-kube-api-access-5ktwr\") pod \"ovsdbserver-nb-1\" (UID: \"591431b6-fd67-4c89-ade6-029bd9e33d62\") " pod="openstack/ovsdbserver-nb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.178688 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/455e6927-176e-4136-aeb8-17cebb8f16a6-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"455e6927-176e-4136-aeb8-17cebb8f16a6\") " pod="openstack/ovsdbserver-nb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.178716 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/455e6927-176e-4136-aeb8-17cebb8f16a6-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"455e6927-176e-4136-aeb8-17cebb8f16a6\") " pod="openstack/ovsdbserver-nb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.178745 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c4bebcb-a217-4298-a65a-bc6bc3e22a12-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"2c4bebcb-a217-4298-a65a-bc6bc3e22a12\") " pod="openstack/ovsdbserver-nb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.178770 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-c231ddc5-d255-4684-873a-b34fd98a2da5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c231ddc5-d255-4684-873a-b34fd98a2da5\") pod \"ovsdbserver-nb-2\" (UID: \"455e6927-176e-4136-aeb8-17cebb8f16a6\") " pod="openstack/ovsdbserver-nb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.178805 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/455e6927-176e-4136-aeb8-17cebb8f16a6-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"455e6927-176e-4136-aeb8-17cebb8f16a6\") " pod="openstack/ovsdbserver-nb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.178833 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ggn5k\" (UniqueName: \"kubernetes.io/projected/455e6927-176e-4136-aeb8-17cebb8f16a6-kube-api-access-ggn5k\") pod \"ovsdbserver-nb-2\" (UID: \"455e6927-176e-4136-aeb8-17cebb8f16a6\") " pod="openstack/ovsdbserver-nb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.178867 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/591431b6-fd67-4c89-ade6-029bd9e33d62-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"591431b6-fd67-4c89-ade6-029bd9e33d62\") " pod="openstack/ovsdbserver-nb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.178891 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/591431b6-fd67-4c89-ade6-029bd9e33d62-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"591431b6-fd67-4c89-ade6-029bd9e33d62\") " pod="openstack/ovsdbserver-nb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.178926 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-502268bd-a16e-4893-8d84-7762a4e6237f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-502268bd-a16e-4893-8d84-7762a4e6237f\") pod \"ovsdbserver-nb-1\" (UID: \"591431b6-fd67-4c89-ade6-029bd9e33d62\") " pod="openstack/ovsdbserver-nb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.179586 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/2c4bebcb-a217-4298-a65a-bc6bc3e22a12-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"2c4bebcb-a217-4298-a65a-bc6bc3e22a12\") " pod="openstack/ovsdbserver-nb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.180870 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2c4bebcb-a217-4298-a65a-bc6bc3e22a12-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"2c4bebcb-a217-4298-a65a-bc6bc3e22a12\") " pod="openstack/ovsdbserver-nb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.181786 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/591431b6-fd67-4c89-ade6-029bd9e33d62-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"591431b6-fd67-4c89-ade6-029bd9e33d62\") " pod="openstack/ovsdbserver-nb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.182532 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c4bebcb-a217-4298-a65a-bc6bc3e22a12-config\") pod \"ovsdbserver-nb-0\" (UID: \"2c4bebcb-a217-4298-a65a-bc6bc3e22a12\") " pod="openstack/ovsdbserver-nb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.182625 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/591431b6-fd67-4c89-ade6-029bd9e33d62-config\") pod \"ovsdbserver-nb-1\" (UID: \"591431b6-fd67-4c89-ade6-029bd9e33d62\") " pod="openstack/ovsdbserver-nb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.183360 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/591431b6-fd67-4c89-ade6-029bd9e33d62-config\") pod \"ovsdbserver-nb-1\" (UID: \"591431b6-fd67-4c89-ade6-029bd9e33d62\") " pod="openstack/ovsdbserver-nb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.183424 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hzqn\" (UniqueName: \"kubernetes.io/projected/2c4bebcb-a217-4298-a65a-bc6bc3e22a12-kube-api-access-6hzqn\") pod \"ovsdbserver-nb-0\" (UID: \"2c4bebcb-a217-4298-a65a-bc6bc3e22a12\") " pod="openstack/ovsdbserver-nb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.183504 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/591431b6-fd67-4c89-ade6-029bd9e33d62-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"591431b6-fd67-4c89-ade6-029bd9e33d62\") " pod="openstack/ovsdbserver-nb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.183882 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/591431b6-fd67-4c89-ade6-029bd9e33d62-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"591431b6-fd67-4c89-ade6-029bd9e33d62\") " pod="openstack/ovsdbserver-nb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.184051 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-2"] Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.187189 4910 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.187222 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-502268bd-a16e-4893-8d84-7762a4e6237f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-502268bd-a16e-4893-8d84-7762a4e6237f\") pod \"ovsdbserver-nb-1\" (UID: \"591431b6-fd67-4c89-ade6-029bd9e33d62\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/4453b0a1d33fd232599f611c5a63a61a498e2ea1fc69cb4ad39e5f42674bd57f/globalmount\"" pod="openstack/ovsdbserver-nb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.187753 4910 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.187861 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-496d9cad-db35-4d37-a2dc-63e8d2c33c29\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-496d9cad-db35-4d37-a2dc-63e8d2c33c29\") pod \"ovsdbserver-nb-0\" (UID: \"2c4bebcb-a217-4298-a65a-bc6bc3e22a12\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/6d717229b3e694a2c3f8134f26d91c88044aa12088e68d33af149136968be2fe/globalmount\"" pod="openstack/ovsdbserver-nb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.189999 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c4bebcb-a217-4298-a65a-bc6bc3e22a12-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"2c4bebcb-a217-4298-a65a-bc6bc3e22a12\") " pod="openstack/ovsdbserver-nb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.190195 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/591431b6-fd67-4c89-ade6-029bd9e33d62-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"591431b6-fd67-4c89-ade6-029bd9e33d62\") " pod="openstack/ovsdbserver-nb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.206055 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ktwr\" (UniqueName: \"kubernetes.io/projected/591431b6-fd67-4c89-ade6-029bd9e33d62-kube-api-access-5ktwr\") pod \"ovsdbserver-nb-1\" (UID: \"591431b6-fd67-4c89-ade6-029bd9e33d62\") " pod="openstack/ovsdbserver-nb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.211043 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hzqn\" (UniqueName: \"kubernetes.io/projected/2c4bebcb-a217-4298-a65a-bc6bc3e22a12-kube-api-access-6hzqn\") pod \"ovsdbserver-nb-0\" (UID: \"2c4bebcb-a217-4298-a65a-bc6bc3e22a12\") " pod="openstack/ovsdbserver-nb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.234305 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-502268bd-a16e-4893-8d84-7762a4e6237f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-502268bd-a16e-4893-8d84-7762a4e6237f\") pod \"ovsdbserver-nb-1\" (UID: \"591431b6-fd67-4c89-ade6-029bd9e33d62\") " pod="openstack/ovsdbserver-nb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.235551 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-496d9cad-db35-4d37-a2dc-63e8d2c33c29\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-496d9cad-db35-4d37-a2dc-63e8d2c33c29\") pod \"ovsdbserver-nb-0\" (UID: \"2c4bebcb-a217-4298-a65a-bc6bc3e22a12\") " pod="openstack/ovsdbserver-nb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.285675 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-8bfe3e37-97ea-4862-999c-f69d07d030e2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8bfe3e37-97ea-4862-999c-f69d07d030e2\") pod \"ovsdbserver-sb-1\" (UID: \"18d58720-d6ec-455f-81be-b70f02d66b95\") " pod="openstack/ovsdbserver-sb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.285750 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4q4w\" (UniqueName: \"kubernetes.io/projected/18d58720-d6ec-455f-81be-b70f02d66b95-kube-api-access-d4q4w\") pod \"ovsdbserver-sb-1\" (UID: \"18d58720-d6ec-455f-81be-b70f02d66b95\") " pod="openstack/ovsdbserver-sb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.285817 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/88b3afe6-1d81-45e3-bf42-2bda83b89872-config\") pod \"ovsdbserver-sb-2\" (UID: \"88b3afe6-1d81-45e3-bf42-2bda83b89872\") " pod="openstack/ovsdbserver-sb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.285865 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/88b3afe6-1d81-45e3-bf42-2bda83b89872-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"88b3afe6-1d81-45e3-bf42-2bda83b89872\") " pod="openstack/ovsdbserver-sb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.285898 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88b3afe6-1d81-45e3-bf42-2bda83b89872-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"88b3afe6-1d81-45e3-bf42-2bda83b89872\") " pod="openstack/ovsdbserver-sb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.285929 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/455e6927-176e-4136-aeb8-17cebb8f16a6-config\") pod \"ovsdbserver-nb-2\" (UID: \"455e6927-176e-4136-aeb8-17cebb8f16a6\") " pod="openstack/ovsdbserver-nb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.285960 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/18d58720-d6ec-455f-81be-b70f02d66b95-config\") pod \"ovsdbserver-sb-1\" (UID: \"18d58720-d6ec-455f-81be-b70f02d66b95\") " pod="openstack/ovsdbserver-sb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.285985 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lssjd\" (UniqueName: \"kubernetes.io/projected/88b3afe6-1d81-45e3-bf42-2bda83b89872-kube-api-access-lssjd\") pod \"ovsdbserver-sb-2\" (UID: \"88b3afe6-1d81-45e3-bf42-2bda83b89872\") " pod="openstack/ovsdbserver-sb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.286011 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18d58720-d6ec-455f-81be-b70f02d66b95-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"18d58720-d6ec-455f-81be-b70f02d66b95\") " pod="openstack/ovsdbserver-sb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.286043 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-723907c9-9381-41f5-b5ba-6e78f0e0b47c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-723907c9-9381-41f5-b5ba-6e78f0e0b47c\") pod \"ovsdbserver-sb-2\" (UID: \"88b3afe6-1d81-45e3-bf42-2bda83b89872\") " pod="openstack/ovsdbserver-sb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.286067 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5f7c71a9-62c5-45fa-ae02-416a77a410d3-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"5f7c71a9-62c5-45fa-ae02-416a77a410d3\") " pod="openstack/ovsdbserver-sb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.286092 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/88b3afe6-1d81-45e3-bf42-2bda83b89872-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"88b3afe6-1d81-45e3-bf42-2bda83b89872\") " pod="openstack/ovsdbserver-sb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.286159 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f7c71a9-62c5-45fa-ae02-416a77a410d3-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"5f7c71a9-62c5-45fa-ae02-416a77a410d3\") " pod="openstack/ovsdbserver-sb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.286205 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/455e6927-176e-4136-aeb8-17cebb8f16a6-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"455e6927-176e-4136-aeb8-17cebb8f16a6\") " pod="openstack/ovsdbserver-nb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.286232 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/455e6927-176e-4136-aeb8-17cebb8f16a6-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"455e6927-176e-4136-aeb8-17cebb8f16a6\") " pod="openstack/ovsdbserver-nb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.286259 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/18d58720-d6ec-455f-81be-b70f02d66b95-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"18d58720-d6ec-455f-81be-b70f02d66b95\") " pod="openstack/ovsdbserver-sb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.286285 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/18d58720-d6ec-455f-81be-b70f02d66b95-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"18d58720-d6ec-455f-81be-b70f02d66b95\") " pod="openstack/ovsdbserver-sb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.286309 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z9pz6\" (UniqueName: \"kubernetes.io/projected/5f7c71a9-62c5-45fa-ae02-416a77a410d3-kube-api-access-z9pz6\") pod \"ovsdbserver-sb-0\" (UID: \"5f7c71a9-62c5-45fa-ae02-416a77a410d3\") " pod="openstack/ovsdbserver-sb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.286337 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-c231ddc5-d255-4684-873a-b34fd98a2da5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c231ddc5-d255-4684-873a-b34fd98a2da5\") pod \"ovsdbserver-nb-2\" (UID: \"455e6927-176e-4136-aeb8-17cebb8f16a6\") " pod="openstack/ovsdbserver-nb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.286360 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-b8fadd32-c502-4ba4-b4fb-c8fdc3bb1bcc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b8fadd32-c502-4ba4-b4fb-c8fdc3bb1bcc\") pod \"ovsdbserver-sb-0\" (UID: \"5f7c71a9-62c5-45fa-ae02-416a77a410d3\") " pod="openstack/ovsdbserver-sb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.286391 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/455e6927-176e-4136-aeb8-17cebb8f16a6-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"455e6927-176e-4136-aeb8-17cebb8f16a6\") " pod="openstack/ovsdbserver-nb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.286425 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ggn5k\" (UniqueName: \"kubernetes.io/projected/455e6927-176e-4136-aeb8-17cebb8f16a6-kube-api-access-ggn5k\") pod \"ovsdbserver-nb-2\" (UID: \"455e6927-176e-4136-aeb8-17cebb8f16a6\") " pod="openstack/ovsdbserver-nb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.286459 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f7c71a9-62c5-45fa-ae02-416a77a410d3-config\") pod \"ovsdbserver-sb-0\" (UID: \"5f7c71a9-62c5-45fa-ae02-416a77a410d3\") " pod="openstack/ovsdbserver-sb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.286481 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5f7c71a9-62c5-45fa-ae02-416a77a410d3-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"5f7c71a9-62c5-45fa-ae02-416a77a410d3\") " pod="openstack/ovsdbserver-sb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.287836 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/455e6927-176e-4136-aeb8-17cebb8f16a6-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"455e6927-176e-4136-aeb8-17cebb8f16a6\") " pod="openstack/ovsdbserver-nb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.288074 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/455e6927-176e-4136-aeb8-17cebb8f16a6-config\") pod \"ovsdbserver-nb-2\" (UID: \"455e6927-176e-4136-aeb8-17cebb8f16a6\") " pod="openstack/ovsdbserver-nb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.288228 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/455e6927-176e-4136-aeb8-17cebb8f16a6-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"455e6927-176e-4136-aeb8-17cebb8f16a6\") " pod="openstack/ovsdbserver-nb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.290954 4910 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.290993 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-c231ddc5-d255-4684-873a-b34fd98a2da5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c231ddc5-d255-4684-873a-b34fd98a2da5\") pod \"ovsdbserver-nb-2\" (UID: \"455e6927-176e-4136-aeb8-17cebb8f16a6\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/5b5a497ad059be340b2fd110fe24cb558dab71622dc34b5aaa6664df30a3351d/globalmount\"" pod="openstack/ovsdbserver-nb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.291029 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/455e6927-176e-4136-aeb8-17cebb8f16a6-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"455e6927-176e-4136-aeb8-17cebb8f16a6\") " pod="openstack/ovsdbserver-nb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.303765 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.306633 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ggn5k\" (UniqueName: \"kubernetes.io/projected/455e6927-176e-4136-aeb8-17cebb8f16a6-kube-api-access-ggn5k\") pod \"ovsdbserver-nb-2\" (UID: \"455e6927-176e-4136-aeb8-17cebb8f16a6\") " pod="openstack/ovsdbserver-nb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.323596 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.331762 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-c231ddc5-d255-4684-873a-b34fd98a2da5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c231ddc5-d255-4684-873a-b34fd98a2da5\") pod \"ovsdbserver-nb-2\" (UID: \"455e6927-176e-4136-aeb8-17cebb8f16a6\") " pod="openstack/ovsdbserver-nb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.388227 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/88b3afe6-1d81-45e3-bf42-2bda83b89872-config\") pod \"ovsdbserver-sb-2\" (UID: \"88b3afe6-1d81-45e3-bf42-2bda83b89872\") " pod="openstack/ovsdbserver-sb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.388307 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/88b3afe6-1d81-45e3-bf42-2bda83b89872-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"88b3afe6-1d81-45e3-bf42-2bda83b89872\") " pod="openstack/ovsdbserver-sb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.388357 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88b3afe6-1d81-45e3-bf42-2bda83b89872-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"88b3afe6-1d81-45e3-bf42-2bda83b89872\") " pod="openstack/ovsdbserver-sb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.388386 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/18d58720-d6ec-455f-81be-b70f02d66b95-config\") pod \"ovsdbserver-sb-1\" (UID: \"18d58720-d6ec-455f-81be-b70f02d66b95\") " pod="openstack/ovsdbserver-sb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.388413 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lssjd\" (UniqueName: \"kubernetes.io/projected/88b3afe6-1d81-45e3-bf42-2bda83b89872-kube-api-access-lssjd\") pod \"ovsdbserver-sb-2\" (UID: \"88b3afe6-1d81-45e3-bf42-2bda83b89872\") " pod="openstack/ovsdbserver-sb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.388453 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18d58720-d6ec-455f-81be-b70f02d66b95-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"18d58720-d6ec-455f-81be-b70f02d66b95\") " pod="openstack/ovsdbserver-sb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.388479 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5f7c71a9-62c5-45fa-ae02-416a77a410d3-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"5f7c71a9-62c5-45fa-ae02-416a77a410d3\") " pod="openstack/ovsdbserver-sb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.388529 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-723907c9-9381-41f5-b5ba-6e78f0e0b47c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-723907c9-9381-41f5-b5ba-6e78f0e0b47c\") pod \"ovsdbserver-sb-2\" (UID: \"88b3afe6-1d81-45e3-bf42-2bda83b89872\") " pod="openstack/ovsdbserver-sb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.388563 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/88b3afe6-1d81-45e3-bf42-2bda83b89872-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"88b3afe6-1d81-45e3-bf42-2bda83b89872\") " pod="openstack/ovsdbserver-sb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.388626 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f7c71a9-62c5-45fa-ae02-416a77a410d3-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"5f7c71a9-62c5-45fa-ae02-416a77a410d3\") " pod="openstack/ovsdbserver-sb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.388699 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/18d58720-d6ec-455f-81be-b70f02d66b95-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"18d58720-d6ec-455f-81be-b70f02d66b95\") " pod="openstack/ovsdbserver-sb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.388724 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/18d58720-d6ec-455f-81be-b70f02d66b95-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"18d58720-d6ec-455f-81be-b70f02d66b95\") " pod="openstack/ovsdbserver-sb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.388780 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z9pz6\" (UniqueName: \"kubernetes.io/projected/5f7c71a9-62c5-45fa-ae02-416a77a410d3-kube-api-access-z9pz6\") pod \"ovsdbserver-sb-0\" (UID: \"5f7c71a9-62c5-45fa-ae02-416a77a410d3\") " pod="openstack/ovsdbserver-sb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.388807 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-b8fadd32-c502-4ba4-b4fb-c8fdc3bb1bcc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b8fadd32-c502-4ba4-b4fb-c8fdc3bb1bcc\") pod \"ovsdbserver-sb-0\" (UID: \"5f7c71a9-62c5-45fa-ae02-416a77a410d3\") " pod="openstack/ovsdbserver-sb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.388896 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f7c71a9-62c5-45fa-ae02-416a77a410d3-config\") pod \"ovsdbserver-sb-0\" (UID: \"5f7c71a9-62c5-45fa-ae02-416a77a410d3\") " pod="openstack/ovsdbserver-sb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.388944 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5f7c71a9-62c5-45fa-ae02-416a77a410d3-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"5f7c71a9-62c5-45fa-ae02-416a77a410d3\") " pod="openstack/ovsdbserver-sb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.389001 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-8bfe3e37-97ea-4862-999c-f69d07d030e2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8bfe3e37-97ea-4862-999c-f69d07d030e2\") pod \"ovsdbserver-sb-1\" (UID: \"18d58720-d6ec-455f-81be-b70f02d66b95\") " pod="openstack/ovsdbserver-sb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.389030 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4q4w\" (UniqueName: \"kubernetes.io/projected/18d58720-d6ec-455f-81be-b70f02d66b95-kube-api-access-d4q4w\") pod \"ovsdbserver-sb-1\" (UID: \"18d58720-d6ec-455f-81be-b70f02d66b95\") " pod="openstack/ovsdbserver-sb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.389518 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/88b3afe6-1d81-45e3-bf42-2bda83b89872-config\") pod \"ovsdbserver-sb-2\" (UID: \"88b3afe6-1d81-45e3-bf42-2bda83b89872\") " pod="openstack/ovsdbserver-sb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.390179 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/88b3afe6-1d81-45e3-bf42-2bda83b89872-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"88b3afe6-1d81-45e3-bf42-2bda83b89872\") " pod="openstack/ovsdbserver-sb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.391172 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5f7c71a9-62c5-45fa-ae02-416a77a410d3-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"5f7c71a9-62c5-45fa-ae02-416a77a410d3\") " pod="openstack/ovsdbserver-sb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.391461 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/18d58720-d6ec-455f-81be-b70f02d66b95-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"18d58720-d6ec-455f-81be-b70f02d66b95\") " pod="openstack/ovsdbserver-sb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.391569 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f7c71a9-62c5-45fa-ae02-416a77a410d3-config\") pod \"ovsdbserver-sb-0\" (UID: \"5f7c71a9-62c5-45fa-ae02-416a77a410d3\") " pod="openstack/ovsdbserver-sb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.392776 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/88b3afe6-1d81-45e3-bf42-2bda83b89872-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"88b3afe6-1d81-45e3-bf42-2bda83b89872\") " pod="openstack/ovsdbserver-sb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.392851 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/18d58720-d6ec-455f-81be-b70f02d66b95-config\") pod \"ovsdbserver-sb-1\" (UID: \"18d58720-d6ec-455f-81be-b70f02d66b95\") " pod="openstack/ovsdbserver-sb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.393193 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/18d58720-d6ec-455f-81be-b70f02d66b95-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"18d58720-d6ec-455f-81be-b70f02d66b95\") " pod="openstack/ovsdbserver-sb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.394393 4910 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.394420 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-723907c9-9381-41f5-b5ba-6e78f0e0b47c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-723907c9-9381-41f5-b5ba-6e78f0e0b47c\") pod \"ovsdbserver-sb-2\" (UID: \"88b3afe6-1d81-45e3-bf42-2bda83b89872\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/b49919bdfdfceefc80af95d74003e8db8d464e0752d6795be519d956740cbf89/globalmount\"" pod="openstack/ovsdbserver-sb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.394883 4910 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.394914 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-8bfe3e37-97ea-4862-999c-f69d07d030e2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8bfe3e37-97ea-4862-999c-f69d07d030e2\") pod \"ovsdbserver-sb-1\" (UID: \"18d58720-d6ec-455f-81be-b70f02d66b95\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/74b24fd87cfbba46d38a9df46144d7f6c6fd34318ac4a3814888f3f73921f7fe/globalmount\"" pod="openstack/ovsdbserver-sb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.395019 4910 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.395040 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-b8fadd32-c502-4ba4-b4fb-c8fdc3bb1bcc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b8fadd32-c502-4ba4-b4fb-c8fdc3bb1bcc\") pod \"ovsdbserver-sb-0\" (UID: \"5f7c71a9-62c5-45fa-ae02-416a77a410d3\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/9acc215103e4d54529bd0fc9d5d6c6f3f180eb24a4b64c7a0fd2b0ea57ac4c35/globalmount\"" pod="openstack/ovsdbserver-sb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.395377 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f7c71a9-62c5-45fa-ae02-416a77a410d3-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"5f7c71a9-62c5-45fa-ae02-416a77a410d3\") " pod="openstack/ovsdbserver-sb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.395536 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5f7c71a9-62c5-45fa-ae02-416a77a410d3-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"5f7c71a9-62c5-45fa-ae02-416a77a410d3\") " pod="openstack/ovsdbserver-sb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.396693 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18d58720-d6ec-455f-81be-b70f02d66b95-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"18d58720-d6ec-455f-81be-b70f02d66b95\") " pod="openstack/ovsdbserver-sb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.397963 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88b3afe6-1d81-45e3-bf42-2bda83b89872-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"88b3afe6-1d81-45e3-bf42-2bda83b89872\") " pod="openstack/ovsdbserver-sb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.422140 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lssjd\" (UniqueName: \"kubernetes.io/projected/88b3afe6-1d81-45e3-bf42-2bda83b89872-kube-api-access-lssjd\") pod \"ovsdbserver-sb-2\" (UID: \"88b3afe6-1d81-45e3-bf42-2bda83b89872\") " pod="openstack/ovsdbserver-sb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.424517 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4q4w\" (UniqueName: \"kubernetes.io/projected/18d58720-d6ec-455f-81be-b70f02d66b95-kube-api-access-d4q4w\") pod \"ovsdbserver-sb-1\" (UID: \"18d58720-d6ec-455f-81be-b70f02d66b95\") " pod="openstack/ovsdbserver-sb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.436546 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z9pz6\" (UniqueName: \"kubernetes.io/projected/5f7c71a9-62c5-45fa-ae02-416a77a410d3-kube-api-access-z9pz6\") pod \"ovsdbserver-sb-0\" (UID: \"5f7c71a9-62c5-45fa-ae02-416a77a410d3\") " pod="openstack/ovsdbserver-sb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.453653 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-8bfe3e37-97ea-4862-999c-f69d07d030e2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8bfe3e37-97ea-4862-999c-f69d07d030e2\") pod \"ovsdbserver-sb-1\" (UID: \"18d58720-d6ec-455f-81be-b70f02d66b95\") " pod="openstack/ovsdbserver-sb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.456410 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-b8fadd32-c502-4ba4-b4fb-c8fdc3bb1bcc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b8fadd32-c502-4ba4-b4fb-c8fdc3bb1bcc\") pod \"ovsdbserver-sb-0\" (UID: \"5f7c71a9-62c5-45fa-ae02-416a77a410d3\") " pod="openstack/ovsdbserver-sb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.463507 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-723907c9-9381-41f5-b5ba-6e78f0e0b47c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-723907c9-9381-41f5-b5ba-6e78f0e0b47c\") pod \"ovsdbserver-sb-2\" (UID: \"88b3afe6-1d81-45e3-bf42-2bda83b89872\") " pod="openstack/ovsdbserver-sb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.466496 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-1" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.486786 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.633213 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-2" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.734244 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.879234 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-1"] Jan 05 23:17:49 crc kubenswrapper[4910]: I0105 23:17:49.980957 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 05 23:17:49 crc kubenswrapper[4910]: W0105 23:17:49.995820 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2c4bebcb_a217_4298_a65a_bc6bc3e22a12.slice/crio-b02c49474cff509c9ad84590f7d8183f17b2996999ba5955722a8007d6109ecd WatchSource:0}: Error finding container b02c49474cff509c9ad84590f7d8183f17b2996999ba5955722a8007d6109ecd: Status 404 returned error can't find the container with id b02c49474cff509c9ad84590f7d8183f17b2996999ba5955722a8007d6109ecd Jan 05 23:17:50 crc kubenswrapper[4910]: I0105 23:17:50.086195 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-2"] Jan 05 23:17:50 crc kubenswrapper[4910]: I0105 23:17:50.269043 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 05 23:17:50 crc kubenswrapper[4910]: W0105 23:17:50.275511 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5f7c71a9_62c5_45fa_ae02_416a77a410d3.slice/crio-206ae43c0071a34c0baf16a0ac76970d58f2bc7e0ad90ad1b166b257d6e3d5d3 WatchSource:0}: Error finding container 206ae43c0071a34c0baf16a0ac76970d58f2bc7e0ad90ad1b166b257d6e3d5d3: Status 404 returned error can't find the container with id 206ae43c0071a34c0baf16a0ac76970d58f2bc7e0ad90ad1b166b257d6e3d5d3 Jan 05 23:17:50 crc kubenswrapper[4910]: I0105 23:17:50.347926 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"5f7c71a9-62c5-45fa-ae02-416a77a410d3","Type":"ContainerStarted","Data":"206ae43c0071a34c0baf16a0ac76970d58f2bc7e0ad90ad1b166b257d6e3d5d3"} Jan 05 23:17:50 crc kubenswrapper[4910]: I0105 23:17:50.350011 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"88b3afe6-1d81-45e3-bf42-2bda83b89872","Type":"ContainerStarted","Data":"3f1a221295c2da50eec7c76066a47ff5cfc96792f13295213f1c1ee76011de19"} Jan 05 23:17:50 crc kubenswrapper[4910]: I0105 23:17:50.354262 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"591431b6-fd67-4c89-ade6-029bd9e33d62","Type":"ContainerStarted","Data":"3a0a185c0a2d72a8de7a5280cf29250f5f842f0c2217c1630db03a2c2e232bc3"} Jan 05 23:17:50 crc kubenswrapper[4910]: I0105 23:17:50.354323 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"591431b6-fd67-4c89-ade6-029bd9e33d62","Type":"ContainerStarted","Data":"f4edaa20304aa8903682e884ef079632487ba47bb03789dd86ca731877eac817"} Jan 05 23:17:50 crc kubenswrapper[4910]: I0105 23:17:50.354338 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"591431b6-fd67-4c89-ade6-029bd9e33d62","Type":"ContainerStarted","Data":"5bbfe7e702584dfb427be756fb441bc47bb0af5e5efecda2bc53077147f4f9ca"} Jan 05 23:17:50 crc kubenswrapper[4910]: I0105 23:17:50.356729 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"2c4bebcb-a217-4298-a65a-bc6bc3e22a12","Type":"ContainerStarted","Data":"1de03fefb2d419457b460927a9c853e799370e38eaabec71c3dae96302a5d9b7"} Jan 05 23:17:50 crc kubenswrapper[4910]: I0105 23:17:50.356770 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"2c4bebcb-a217-4298-a65a-bc6bc3e22a12","Type":"ContainerStarted","Data":"b02c49474cff509c9ad84590f7d8183f17b2996999ba5955722a8007d6109ecd"} Jan 05 23:17:50 crc kubenswrapper[4910]: I0105 23:17:50.380280 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-1" podStartSLOduration=3.380247048 podStartE2EDuration="3.380247048s" podCreationTimestamp="2026-01-05 23:17:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:17:50.372950228 +0000 UTC m=+5201.950447898" watchObservedRunningTime="2026-01-05 23:17:50.380247048 +0000 UTC m=+5201.957744718" Jan 05 23:17:50 crc kubenswrapper[4910]: I0105 23:17:50.757363 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-1"] Jan 05 23:17:50 crc kubenswrapper[4910]: W0105 23:17:50.765573 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod18d58720_d6ec_455f_81be_b70f02d66b95.slice/crio-b3416519c66b713c57368faee9b2e4711d5a0734360a345c1a1051c4ee1738a9 WatchSource:0}: Error finding container b3416519c66b713c57368faee9b2e4711d5a0734360a345c1a1051c4ee1738a9: Status 404 returned error can't find the container with id b3416519c66b713c57368faee9b2e4711d5a0734360a345c1a1051c4ee1738a9 Jan 05 23:17:51 crc kubenswrapper[4910]: W0105 23:17:51.165269 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod455e6927_176e_4136_aeb8_17cebb8f16a6.slice/crio-ce34d4b445c9d8ff62c386a1970fed95984139f2d1a9f149152f933a94fd6ddd WatchSource:0}: Error finding container ce34d4b445c9d8ff62c386a1970fed95984139f2d1a9f149152f933a94fd6ddd: Status 404 returned error can't find the container with id ce34d4b445c9d8ff62c386a1970fed95984139f2d1a9f149152f933a94fd6ddd Jan 05 23:17:51 crc kubenswrapper[4910]: I0105 23:17:51.165623 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-2"] Jan 05 23:17:51 crc kubenswrapper[4910]: I0105 23:17:51.374083 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"2c4bebcb-a217-4298-a65a-bc6bc3e22a12","Type":"ContainerStarted","Data":"c9d513a416b3fa8d061814e3e7bda12fe2e064b354def5b99325df343c8136e2"} Jan 05 23:17:51 crc kubenswrapper[4910]: I0105 23:17:51.379879 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"18d58720-d6ec-455f-81be-b70f02d66b95","Type":"ContainerStarted","Data":"1bb14123846443eb1f38f0cb79bed4e0f26771d029549bd297898b8633ec3834"} Jan 05 23:17:51 crc kubenswrapper[4910]: I0105 23:17:51.379931 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"18d58720-d6ec-455f-81be-b70f02d66b95","Type":"ContainerStarted","Data":"cc19b337c1a772ef798418f40ca8a40ce12aba8418dcffaf71adff9b56dea870"} Jan 05 23:17:51 crc kubenswrapper[4910]: I0105 23:17:51.379945 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"18d58720-d6ec-455f-81be-b70f02d66b95","Type":"ContainerStarted","Data":"b3416519c66b713c57368faee9b2e4711d5a0734360a345c1a1051c4ee1738a9"} Jan 05 23:17:51 crc kubenswrapper[4910]: I0105 23:17:51.382449 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"5f7c71a9-62c5-45fa-ae02-416a77a410d3","Type":"ContainerStarted","Data":"31fd4f53413b4cd345ab5e159b1997288faf9a3ab83626035f97b5b435e46ae9"} Jan 05 23:17:51 crc kubenswrapper[4910]: I0105 23:17:51.382509 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"5f7c71a9-62c5-45fa-ae02-416a77a410d3","Type":"ContainerStarted","Data":"a4d6ff5626d3c22e31b6475812608ce762bf1b49758582e14c023a7d7b8f78b7"} Jan 05 23:17:51 crc kubenswrapper[4910]: I0105 23:17:51.385466 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"88b3afe6-1d81-45e3-bf42-2bda83b89872","Type":"ContainerStarted","Data":"6862d747185d63e4c6d23f29a279e6d1c098733daf036c4ca4338b264ab197f4"} Jan 05 23:17:51 crc kubenswrapper[4910]: I0105 23:17:51.385510 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"88b3afe6-1d81-45e3-bf42-2bda83b89872","Type":"ContainerStarted","Data":"2b5d730d95ac269c1bbb2a2714af26f80232d40aac3284fc1f158641d265c28f"} Jan 05 23:17:51 crc kubenswrapper[4910]: I0105 23:17:51.388791 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"455e6927-176e-4136-aeb8-17cebb8f16a6","Type":"ContainerStarted","Data":"31cc8b05c8e481d3602077e4e42ca2e9f91c45ceebf3adcc9e3bdd505f9984b5"} Jan 05 23:17:51 crc kubenswrapper[4910]: I0105 23:17:51.388841 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"455e6927-176e-4136-aeb8-17cebb8f16a6","Type":"ContainerStarted","Data":"ce34d4b445c9d8ff62c386a1970fed95984139f2d1a9f149152f933a94fd6ddd"} Jan 05 23:17:51 crc kubenswrapper[4910]: I0105 23:17:51.435132 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-1" podStartSLOduration=3.435084245 podStartE2EDuration="3.435084245s" podCreationTimestamp="2026-01-05 23:17:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:17:51.419874088 +0000 UTC m=+5202.997371758" watchObservedRunningTime="2026-01-05 23:17:51.435084245 +0000 UTC m=+5203.012581915" Jan 05 23:17:51 crc kubenswrapper[4910]: I0105 23:17:51.436295 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=4.436289265 podStartE2EDuration="4.436289265s" podCreationTimestamp="2026-01-05 23:17:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:17:51.401512653 +0000 UTC m=+5202.979010333" watchObservedRunningTime="2026-01-05 23:17:51.436289265 +0000 UTC m=+5203.013786925" Jan 05 23:17:51 crc kubenswrapper[4910]: I0105 23:17:51.448719 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=3.448680231 podStartE2EDuration="3.448680231s" podCreationTimestamp="2026-01-05 23:17:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:17:51.448395744 +0000 UTC m=+5203.025893414" watchObservedRunningTime="2026-01-05 23:17:51.448680231 +0000 UTC m=+5203.026177901" Jan 05 23:17:51 crc kubenswrapper[4910]: I0105 23:17:51.478114 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-2" podStartSLOduration=3.4780859299999998 podStartE2EDuration="3.47808593s" podCreationTimestamp="2026-01-05 23:17:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:17:51.466903193 +0000 UTC m=+5203.044400873" watchObservedRunningTime="2026-01-05 23:17:51.47808593 +0000 UTC m=+5203.055583600" Jan 05 23:17:52 crc kubenswrapper[4910]: I0105 23:17:52.304514 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Jan 05 23:17:52 crc kubenswrapper[4910]: I0105 23:17:52.324441 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-1" Jan 05 23:17:52 crc kubenswrapper[4910]: I0105 23:17:52.405829 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"455e6927-176e-4136-aeb8-17cebb8f16a6","Type":"ContainerStarted","Data":"eed77045eaaffc69641bd22391c1cceae519142ca7ce054b4a3ee487a432b60f"} Jan 05 23:17:52 crc kubenswrapper[4910]: I0105 23:17:52.444251 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-2" podStartSLOduration=5.444205389 podStartE2EDuration="5.444205389s" podCreationTimestamp="2026-01-05 23:17:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:17:52.439035841 +0000 UTC m=+5204.016533551" watchObservedRunningTime="2026-01-05 23:17:52.444205389 +0000 UTC m=+5204.021703099" Jan 05 23:17:52 crc kubenswrapper[4910]: I0105 23:17:52.467197 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-1" Jan 05 23:17:52 crc kubenswrapper[4910]: I0105 23:17:52.488305 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-2" Jan 05 23:17:52 crc kubenswrapper[4910]: I0105 23:17:52.633366 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-2" Jan 05 23:17:52 crc kubenswrapper[4910]: I0105 23:17:52.738191 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Jan 05 23:17:54 crc kubenswrapper[4910]: I0105 23:17:54.304270 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Jan 05 23:17:54 crc kubenswrapper[4910]: I0105 23:17:54.324481 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-1" Jan 05 23:17:54 crc kubenswrapper[4910]: I0105 23:17:54.467417 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-1" Jan 05 23:17:54 crc kubenswrapper[4910]: I0105 23:17:54.487763 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-2" Jan 05 23:17:54 crc kubenswrapper[4910]: I0105 23:17:54.633670 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-2" Jan 05 23:17:54 crc kubenswrapper[4910]: I0105 23:17:54.737235 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Jan 05 23:17:55 crc kubenswrapper[4910]: I0105 23:17:55.377627 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Jan 05 23:17:55 crc kubenswrapper[4910]: I0105 23:17:55.399939 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-1" Jan 05 23:17:55 crc kubenswrapper[4910]: I0105 23:17:55.460991 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-1" Jan 05 23:17:55 crc kubenswrapper[4910]: I0105 23:17:55.494024 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Jan 05 23:17:55 crc kubenswrapper[4910]: I0105 23:17:55.544456 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-1" Jan 05 23:17:55 crc kubenswrapper[4910]: I0105 23:17:55.553204 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-2" Jan 05 23:17:55 crc kubenswrapper[4910]: I0105 23:17:55.615938 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-2" Jan 05 23:17:55 crc kubenswrapper[4910]: I0105 23:17:55.628047 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-1" Jan 05 23:17:55 crc kubenswrapper[4910]: I0105 23:17:55.687550 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-2" Jan 05 23:17:55 crc kubenswrapper[4910]: I0105 23:17:55.704686 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-f8b57784f-wq4cl"] Jan 05 23:17:55 crc kubenswrapper[4910]: I0105 23:17:55.712846 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f8b57784f-wq4cl" Jan 05 23:17:55 crc kubenswrapper[4910]: I0105 23:17:55.717748 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Jan 05 23:17:55 crc kubenswrapper[4910]: I0105 23:17:55.730441 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f8b57784f-wq4cl"] Jan 05 23:17:55 crc kubenswrapper[4910]: I0105 23:17:55.736791 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e8de6e1-a898-42b6-82b4-8896b1b068d7-config\") pod \"dnsmasq-dns-f8b57784f-wq4cl\" (UID: \"1e8de6e1-a898-42b6-82b4-8896b1b068d7\") " pod="openstack/dnsmasq-dns-f8b57784f-wq4cl" Jan 05 23:17:55 crc kubenswrapper[4910]: I0105 23:17:55.736839 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1e8de6e1-a898-42b6-82b4-8896b1b068d7-dns-svc\") pod \"dnsmasq-dns-f8b57784f-wq4cl\" (UID: \"1e8de6e1-a898-42b6-82b4-8896b1b068d7\") " pod="openstack/dnsmasq-dns-f8b57784f-wq4cl" Jan 05 23:17:55 crc kubenswrapper[4910]: I0105 23:17:55.736866 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1e8de6e1-a898-42b6-82b4-8896b1b068d7-ovsdbserver-nb\") pod \"dnsmasq-dns-f8b57784f-wq4cl\" (UID: \"1e8de6e1-a898-42b6-82b4-8896b1b068d7\") " pod="openstack/dnsmasq-dns-f8b57784f-wq4cl" Jan 05 23:17:55 crc kubenswrapper[4910]: I0105 23:17:55.736984 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vc868\" (UniqueName: \"kubernetes.io/projected/1e8de6e1-a898-42b6-82b4-8896b1b068d7-kube-api-access-vc868\") pod \"dnsmasq-dns-f8b57784f-wq4cl\" (UID: \"1e8de6e1-a898-42b6-82b4-8896b1b068d7\") " pod="openstack/dnsmasq-dns-f8b57784f-wq4cl" Jan 05 23:17:55 crc kubenswrapper[4910]: I0105 23:17:55.777038 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Jan 05 23:17:55 crc kubenswrapper[4910]: I0105 23:17:55.826196 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Jan 05 23:17:55 crc kubenswrapper[4910]: I0105 23:17:55.869584 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vc868\" (UniqueName: \"kubernetes.io/projected/1e8de6e1-a898-42b6-82b4-8896b1b068d7-kube-api-access-vc868\") pod \"dnsmasq-dns-f8b57784f-wq4cl\" (UID: \"1e8de6e1-a898-42b6-82b4-8896b1b068d7\") " pod="openstack/dnsmasq-dns-f8b57784f-wq4cl" Jan 05 23:17:55 crc kubenswrapper[4910]: I0105 23:17:55.869786 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e8de6e1-a898-42b6-82b4-8896b1b068d7-config\") pod \"dnsmasq-dns-f8b57784f-wq4cl\" (UID: \"1e8de6e1-a898-42b6-82b4-8896b1b068d7\") " pod="openstack/dnsmasq-dns-f8b57784f-wq4cl" Jan 05 23:17:55 crc kubenswrapper[4910]: I0105 23:17:55.869840 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1e8de6e1-a898-42b6-82b4-8896b1b068d7-dns-svc\") pod \"dnsmasq-dns-f8b57784f-wq4cl\" (UID: \"1e8de6e1-a898-42b6-82b4-8896b1b068d7\") " pod="openstack/dnsmasq-dns-f8b57784f-wq4cl" Jan 05 23:17:55 crc kubenswrapper[4910]: I0105 23:17:55.869917 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1e8de6e1-a898-42b6-82b4-8896b1b068d7-ovsdbserver-nb\") pod \"dnsmasq-dns-f8b57784f-wq4cl\" (UID: \"1e8de6e1-a898-42b6-82b4-8896b1b068d7\") " pod="openstack/dnsmasq-dns-f8b57784f-wq4cl" Jan 05 23:17:55 crc kubenswrapper[4910]: I0105 23:17:55.871075 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1e8de6e1-a898-42b6-82b4-8896b1b068d7-ovsdbserver-nb\") pod \"dnsmasq-dns-f8b57784f-wq4cl\" (UID: \"1e8de6e1-a898-42b6-82b4-8896b1b068d7\") " pod="openstack/dnsmasq-dns-f8b57784f-wq4cl" Jan 05 23:17:55 crc kubenswrapper[4910]: I0105 23:17:55.871753 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e8de6e1-a898-42b6-82b4-8896b1b068d7-config\") pod \"dnsmasq-dns-f8b57784f-wq4cl\" (UID: \"1e8de6e1-a898-42b6-82b4-8896b1b068d7\") " pod="openstack/dnsmasq-dns-f8b57784f-wq4cl" Jan 05 23:17:55 crc kubenswrapper[4910]: I0105 23:17:55.872174 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1e8de6e1-a898-42b6-82b4-8896b1b068d7-dns-svc\") pod \"dnsmasq-dns-f8b57784f-wq4cl\" (UID: \"1e8de6e1-a898-42b6-82b4-8896b1b068d7\") " pod="openstack/dnsmasq-dns-f8b57784f-wq4cl" Jan 05 23:17:55 crc kubenswrapper[4910]: I0105 23:17:55.894689 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vc868\" (UniqueName: \"kubernetes.io/projected/1e8de6e1-a898-42b6-82b4-8896b1b068d7-kube-api-access-vc868\") pod \"dnsmasq-dns-f8b57784f-wq4cl\" (UID: \"1e8de6e1-a898-42b6-82b4-8896b1b068d7\") " pod="openstack/dnsmasq-dns-f8b57784f-wq4cl" Jan 05 23:17:55 crc kubenswrapper[4910]: I0105 23:17:55.992632 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f8b57784f-wq4cl"] Jan 05 23:17:55 crc kubenswrapper[4910]: I0105 23:17:55.994265 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f8b57784f-wq4cl" Jan 05 23:17:56 crc kubenswrapper[4910]: I0105 23:17:56.049769 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-76554fcc87-lm25j"] Jan 05 23:17:56 crc kubenswrapper[4910]: I0105 23:17:56.051296 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76554fcc87-lm25j" Jan 05 23:17:56 crc kubenswrapper[4910]: I0105 23:17:56.053919 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Jan 05 23:17:56 crc kubenswrapper[4910]: I0105 23:17:56.064427 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-76554fcc87-lm25j"] Jan 05 23:17:56 crc kubenswrapper[4910]: I0105 23:17:56.209782 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/87645fe7-bba7-4b33-a47f-44949bb8e28b-ovsdbserver-nb\") pod \"dnsmasq-dns-76554fcc87-lm25j\" (UID: \"87645fe7-bba7-4b33-a47f-44949bb8e28b\") " pod="openstack/dnsmasq-dns-76554fcc87-lm25j" Jan 05 23:17:56 crc kubenswrapper[4910]: I0105 23:17:56.210419 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87645fe7-bba7-4b33-a47f-44949bb8e28b-config\") pod \"dnsmasq-dns-76554fcc87-lm25j\" (UID: \"87645fe7-bba7-4b33-a47f-44949bb8e28b\") " pod="openstack/dnsmasq-dns-76554fcc87-lm25j" Jan 05 23:17:56 crc kubenswrapper[4910]: I0105 23:17:56.210470 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j674n\" (UniqueName: \"kubernetes.io/projected/87645fe7-bba7-4b33-a47f-44949bb8e28b-kube-api-access-j674n\") pod \"dnsmasq-dns-76554fcc87-lm25j\" (UID: \"87645fe7-bba7-4b33-a47f-44949bb8e28b\") " pod="openstack/dnsmasq-dns-76554fcc87-lm25j" Jan 05 23:17:56 crc kubenswrapper[4910]: I0105 23:17:56.210545 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/87645fe7-bba7-4b33-a47f-44949bb8e28b-ovsdbserver-sb\") pod \"dnsmasq-dns-76554fcc87-lm25j\" (UID: \"87645fe7-bba7-4b33-a47f-44949bb8e28b\") " pod="openstack/dnsmasq-dns-76554fcc87-lm25j" Jan 05 23:17:56 crc kubenswrapper[4910]: I0105 23:17:56.210609 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/87645fe7-bba7-4b33-a47f-44949bb8e28b-dns-svc\") pod \"dnsmasq-dns-76554fcc87-lm25j\" (UID: \"87645fe7-bba7-4b33-a47f-44949bb8e28b\") " pod="openstack/dnsmasq-dns-76554fcc87-lm25j" Jan 05 23:17:56 crc kubenswrapper[4910]: I0105 23:17:56.312731 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/87645fe7-bba7-4b33-a47f-44949bb8e28b-dns-svc\") pod \"dnsmasq-dns-76554fcc87-lm25j\" (UID: \"87645fe7-bba7-4b33-a47f-44949bb8e28b\") " pod="openstack/dnsmasq-dns-76554fcc87-lm25j" Jan 05 23:17:56 crc kubenswrapper[4910]: I0105 23:17:56.312821 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/87645fe7-bba7-4b33-a47f-44949bb8e28b-ovsdbserver-nb\") pod \"dnsmasq-dns-76554fcc87-lm25j\" (UID: \"87645fe7-bba7-4b33-a47f-44949bb8e28b\") " pod="openstack/dnsmasq-dns-76554fcc87-lm25j" Jan 05 23:17:56 crc kubenswrapper[4910]: I0105 23:17:56.312852 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87645fe7-bba7-4b33-a47f-44949bb8e28b-config\") pod \"dnsmasq-dns-76554fcc87-lm25j\" (UID: \"87645fe7-bba7-4b33-a47f-44949bb8e28b\") " pod="openstack/dnsmasq-dns-76554fcc87-lm25j" Jan 05 23:17:56 crc kubenswrapper[4910]: I0105 23:17:56.312887 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j674n\" (UniqueName: \"kubernetes.io/projected/87645fe7-bba7-4b33-a47f-44949bb8e28b-kube-api-access-j674n\") pod \"dnsmasq-dns-76554fcc87-lm25j\" (UID: \"87645fe7-bba7-4b33-a47f-44949bb8e28b\") " pod="openstack/dnsmasq-dns-76554fcc87-lm25j" Jan 05 23:17:56 crc kubenswrapper[4910]: I0105 23:17:56.312926 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/87645fe7-bba7-4b33-a47f-44949bb8e28b-ovsdbserver-sb\") pod \"dnsmasq-dns-76554fcc87-lm25j\" (UID: \"87645fe7-bba7-4b33-a47f-44949bb8e28b\") " pod="openstack/dnsmasq-dns-76554fcc87-lm25j" Jan 05 23:17:56 crc kubenswrapper[4910]: I0105 23:17:56.313878 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/87645fe7-bba7-4b33-a47f-44949bb8e28b-dns-svc\") pod \"dnsmasq-dns-76554fcc87-lm25j\" (UID: \"87645fe7-bba7-4b33-a47f-44949bb8e28b\") " pod="openstack/dnsmasq-dns-76554fcc87-lm25j" Jan 05 23:17:56 crc kubenswrapper[4910]: I0105 23:17:56.313973 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/87645fe7-bba7-4b33-a47f-44949bb8e28b-ovsdbserver-nb\") pod \"dnsmasq-dns-76554fcc87-lm25j\" (UID: \"87645fe7-bba7-4b33-a47f-44949bb8e28b\") " pod="openstack/dnsmasq-dns-76554fcc87-lm25j" Jan 05 23:17:56 crc kubenswrapper[4910]: I0105 23:17:56.314202 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87645fe7-bba7-4b33-a47f-44949bb8e28b-config\") pod \"dnsmasq-dns-76554fcc87-lm25j\" (UID: \"87645fe7-bba7-4b33-a47f-44949bb8e28b\") " pod="openstack/dnsmasq-dns-76554fcc87-lm25j" Jan 05 23:17:56 crc kubenswrapper[4910]: I0105 23:17:56.314532 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/87645fe7-bba7-4b33-a47f-44949bb8e28b-ovsdbserver-sb\") pod \"dnsmasq-dns-76554fcc87-lm25j\" (UID: \"87645fe7-bba7-4b33-a47f-44949bb8e28b\") " pod="openstack/dnsmasq-dns-76554fcc87-lm25j" Jan 05 23:17:56 crc kubenswrapper[4910]: I0105 23:17:56.355325 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j674n\" (UniqueName: \"kubernetes.io/projected/87645fe7-bba7-4b33-a47f-44949bb8e28b-kube-api-access-j674n\") pod \"dnsmasq-dns-76554fcc87-lm25j\" (UID: \"87645fe7-bba7-4b33-a47f-44949bb8e28b\") " pod="openstack/dnsmasq-dns-76554fcc87-lm25j" Jan 05 23:17:56 crc kubenswrapper[4910]: I0105 23:17:56.433580 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76554fcc87-lm25j" Jan 05 23:17:56 crc kubenswrapper[4910]: I0105 23:17:56.529884 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-2" Jan 05 23:17:56 crc kubenswrapper[4910]: I0105 23:17:56.570835 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f8b57784f-wq4cl"] Jan 05 23:17:56 crc kubenswrapper[4910]: I0105 23:17:56.966092 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-76554fcc87-lm25j"] Jan 05 23:17:57 crc kubenswrapper[4910]: I0105 23:17:57.495029 4910 generic.go:334] "Generic (PLEG): container finished" podID="87645fe7-bba7-4b33-a47f-44949bb8e28b" containerID="0a1f7f1866185bb1c850cbd0623f0a8bad579a67af1c00140e96328d70323774" exitCode=0 Jan 05 23:17:57 crc kubenswrapper[4910]: I0105 23:17:57.495196 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76554fcc87-lm25j" event={"ID":"87645fe7-bba7-4b33-a47f-44949bb8e28b","Type":"ContainerDied","Data":"0a1f7f1866185bb1c850cbd0623f0a8bad579a67af1c00140e96328d70323774"} Jan 05 23:17:57 crc kubenswrapper[4910]: I0105 23:17:57.495715 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76554fcc87-lm25j" event={"ID":"87645fe7-bba7-4b33-a47f-44949bb8e28b","Type":"ContainerStarted","Data":"a71a687af714fef4bccf5885682f5a2a6ab6bdebf16dddf19497ac3e998a6745"} Jan 05 23:17:57 crc kubenswrapper[4910]: I0105 23:17:57.497585 4910 generic.go:334] "Generic (PLEG): container finished" podID="1e8de6e1-a898-42b6-82b4-8896b1b068d7" containerID="f346587a98db0aadd323d0a6bb6fd7f806fb13a5cd5674d012b05dd8ff19d3bd" exitCode=0 Jan 05 23:17:57 crc kubenswrapper[4910]: I0105 23:17:57.497678 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f8b57784f-wq4cl" event={"ID":"1e8de6e1-a898-42b6-82b4-8896b1b068d7","Type":"ContainerDied","Data":"f346587a98db0aadd323d0a6bb6fd7f806fb13a5cd5674d012b05dd8ff19d3bd"} Jan 05 23:17:57 crc kubenswrapper[4910]: I0105 23:17:57.497782 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f8b57784f-wq4cl" event={"ID":"1e8de6e1-a898-42b6-82b4-8896b1b068d7","Type":"ContainerStarted","Data":"7304ce39029dbc7ef416f4a51b9a7c4d57f056c8cad216767d54d0495edad221"} Jan 05 23:17:57 crc kubenswrapper[4910]: I0105 23:17:57.853177 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f8b57784f-wq4cl" Jan 05 23:17:57 crc kubenswrapper[4910]: I0105 23:17:57.942658 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1e8de6e1-a898-42b6-82b4-8896b1b068d7-ovsdbserver-nb\") pod \"1e8de6e1-a898-42b6-82b4-8896b1b068d7\" (UID: \"1e8de6e1-a898-42b6-82b4-8896b1b068d7\") " Jan 05 23:17:57 crc kubenswrapper[4910]: I0105 23:17:57.942850 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e8de6e1-a898-42b6-82b4-8896b1b068d7-config\") pod \"1e8de6e1-a898-42b6-82b4-8896b1b068d7\" (UID: \"1e8de6e1-a898-42b6-82b4-8896b1b068d7\") " Jan 05 23:17:57 crc kubenswrapper[4910]: I0105 23:17:57.942971 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vc868\" (UniqueName: \"kubernetes.io/projected/1e8de6e1-a898-42b6-82b4-8896b1b068d7-kube-api-access-vc868\") pod \"1e8de6e1-a898-42b6-82b4-8896b1b068d7\" (UID: \"1e8de6e1-a898-42b6-82b4-8896b1b068d7\") " Jan 05 23:17:57 crc kubenswrapper[4910]: I0105 23:17:57.943163 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1e8de6e1-a898-42b6-82b4-8896b1b068d7-dns-svc\") pod \"1e8de6e1-a898-42b6-82b4-8896b1b068d7\" (UID: \"1e8de6e1-a898-42b6-82b4-8896b1b068d7\") " Jan 05 23:17:57 crc kubenswrapper[4910]: I0105 23:17:57.949450 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e8de6e1-a898-42b6-82b4-8896b1b068d7-kube-api-access-vc868" (OuterVolumeSpecName: "kube-api-access-vc868") pod "1e8de6e1-a898-42b6-82b4-8896b1b068d7" (UID: "1e8de6e1-a898-42b6-82b4-8896b1b068d7"). InnerVolumeSpecName "kube-api-access-vc868". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:17:57 crc kubenswrapper[4910]: I0105 23:17:57.968974 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1e8de6e1-a898-42b6-82b4-8896b1b068d7-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1e8de6e1-a898-42b6-82b4-8896b1b068d7" (UID: "1e8de6e1-a898-42b6-82b4-8896b1b068d7"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:17:57 crc kubenswrapper[4910]: I0105 23:17:57.981032 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1e8de6e1-a898-42b6-82b4-8896b1b068d7-config" (OuterVolumeSpecName: "config") pod "1e8de6e1-a898-42b6-82b4-8896b1b068d7" (UID: "1e8de6e1-a898-42b6-82b4-8896b1b068d7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:17:57 crc kubenswrapper[4910]: I0105 23:17:57.985260 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1e8de6e1-a898-42b6-82b4-8896b1b068d7-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "1e8de6e1-a898-42b6-82b4-8896b1b068d7" (UID: "1e8de6e1-a898-42b6-82b4-8896b1b068d7"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:17:58 crc kubenswrapper[4910]: I0105 23:17:58.045419 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e8de6e1-a898-42b6-82b4-8896b1b068d7-config\") on node \"crc\" DevicePath \"\"" Jan 05 23:17:58 crc kubenswrapper[4910]: I0105 23:17:58.045467 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vc868\" (UniqueName: \"kubernetes.io/projected/1e8de6e1-a898-42b6-82b4-8896b1b068d7-kube-api-access-vc868\") on node \"crc\" DevicePath \"\"" Jan 05 23:17:58 crc kubenswrapper[4910]: I0105 23:17:58.045481 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1e8de6e1-a898-42b6-82b4-8896b1b068d7-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 05 23:17:58 crc kubenswrapper[4910]: I0105 23:17:58.045494 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1e8de6e1-a898-42b6-82b4-8896b1b068d7-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 05 23:17:58 crc kubenswrapper[4910]: I0105 23:17:58.514440 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f8b57784f-wq4cl" event={"ID":"1e8de6e1-a898-42b6-82b4-8896b1b068d7","Type":"ContainerDied","Data":"7304ce39029dbc7ef416f4a51b9a7c4d57f056c8cad216767d54d0495edad221"} Jan 05 23:17:58 crc kubenswrapper[4910]: I0105 23:17:58.515104 4910 scope.go:117] "RemoveContainer" containerID="f346587a98db0aadd323d0a6bb6fd7f806fb13a5cd5674d012b05dd8ff19d3bd" Jan 05 23:17:58 crc kubenswrapper[4910]: I0105 23:17:58.515377 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f8b57784f-wq4cl" Jan 05 23:17:58 crc kubenswrapper[4910]: I0105 23:17:58.516839 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76554fcc87-lm25j" event={"ID":"87645fe7-bba7-4b33-a47f-44949bb8e28b","Type":"ContainerStarted","Data":"dfca0a76a4c0728a569ed4f7f0d64a2d878e53597d2a6aa9e3872c25b5573806"} Jan 05 23:17:58 crc kubenswrapper[4910]: I0105 23:17:58.522348 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-76554fcc87-lm25j" Jan 05 23:17:58 crc kubenswrapper[4910]: I0105 23:17:58.614210 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-76554fcc87-lm25j" podStartSLOduration=3.614183875 podStartE2EDuration="3.614183875s" podCreationTimestamp="2026-01-05 23:17:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:17:58.565770656 +0000 UTC m=+5210.143268366" watchObservedRunningTime="2026-01-05 23:17:58.614183875 +0000 UTC m=+5210.191681545" Jan 05 23:17:58 crc kubenswrapper[4910]: I0105 23:17:58.633994 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f8b57784f-wq4cl"] Jan 05 23:17:58 crc kubenswrapper[4910]: I0105 23:17:58.642318 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-f8b57784f-wq4cl"] Jan 05 23:17:58 crc kubenswrapper[4910]: I0105 23:17:58.735173 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e8de6e1-a898-42b6-82b4-8896b1b068d7" path="/var/lib/kubelet/pods/1e8de6e1-a898-42b6-82b4-8896b1b068d7/volumes" Jan 05 23:17:59 crc kubenswrapper[4910]: I0105 23:17:59.355574 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-copy-data"] Jan 05 23:17:59 crc kubenswrapper[4910]: E0105 23:17:59.356702 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e8de6e1-a898-42b6-82b4-8896b1b068d7" containerName="init" Jan 05 23:17:59 crc kubenswrapper[4910]: I0105 23:17:59.356775 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e8de6e1-a898-42b6-82b4-8896b1b068d7" containerName="init" Jan 05 23:17:59 crc kubenswrapper[4910]: I0105 23:17:59.358282 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e8de6e1-a898-42b6-82b4-8896b1b068d7" containerName="init" Jan 05 23:17:59 crc kubenswrapper[4910]: I0105 23:17:59.359339 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Jan 05 23:17:59 crc kubenswrapper[4910]: I0105 23:17:59.364787 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovn-data-cert" Jan 05 23:17:59 crc kubenswrapper[4910]: I0105 23:17:59.366790 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-copy-data"] Jan 05 23:17:59 crc kubenswrapper[4910]: I0105 23:17:59.493172 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-d8b234ea-22e6-4ade-a9c8-27976f7bc339\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d8b234ea-22e6-4ade-a9c8-27976f7bc339\") pod \"ovn-copy-data\" (UID: \"a9bfffd0-f255-43e2-8c45-bf4ce76358ff\") " pod="openstack/ovn-copy-data" Jan 05 23:17:59 crc kubenswrapper[4910]: I0105 23:17:59.493276 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mvqtl\" (UniqueName: \"kubernetes.io/projected/a9bfffd0-f255-43e2-8c45-bf4ce76358ff-kube-api-access-mvqtl\") pod \"ovn-copy-data\" (UID: \"a9bfffd0-f255-43e2-8c45-bf4ce76358ff\") " pod="openstack/ovn-copy-data" Jan 05 23:17:59 crc kubenswrapper[4910]: I0105 23:17:59.493309 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/a9bfffd0-f255-43e2-8c45-bf4ce76358ff-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"a9bfffd0-f255-43e2-8c45-bf4ce76358ff\") " pod="openstack/ovn-copy-data" Jan 05 23:17:59 crc kubenswrapper[4910]: I0105 23:17:59.595684 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-d8b234ea-22e6-4ade-a9c8-27976f7bc339\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d8b234ea-22e6-4ade-a9c8-27976f7bc339\") pod \"ovn-copy-data\" (UID: \"a9bfffd0-f255-43e2-8c45-bf4ce76358ff\") " pod="openstack/ovn-copy-data" Jan 05 23:17:59 crc kubenswrapper[4910]: I0105 23:17:59.595863 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mvqtl\" (UniqueName: \"kubernetes.io/projected/a9bfffd0-f255-43e2-8c45-bf4ce76358ff-kube-api-access-mvqtl\") pod \"ovn-copy-data\" (UID: \"a9bfffd0-f255-43e2-8c45-bf4ce76358ff\") " pod="openstack/ovn-copy-data" Jan 05 23:17:59 crc kubenswrapper[4910]: I0105 23:17:59.595910 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/a9bfffd0-f255-43e2-8c45-bf4ce76358ff-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"a9bfffd0-f255-43e2-8c45-bf4ce76358ff\") " pod="openstack/ovn-copy-data" Jan 05 23:17:59 crc kubenswrapper[4910]: I0105 23:17:59.603511 4910 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 05 23:17:59 crc kubenswrapper[4910]: I0105 23:17:59.603584 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-d8b234ea-22e6-4ade-a9c8-27976f7bc339\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d8b234ea-22e6-4ade-a9c8-27976f7bc339\") pod \"ovn-copy-data\" (UID: \"a9bfffd0-f255-43e2-8c45-bf4ce76358ff\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/373556a8bff8f5c4c6b98eb5724611d1a4c09c873e166e281b7dd2cc4fd62e81/globalmount\"" pod="openstack/ovn-copy-data" Jan 05 23:17:59 crc kubenswrapper[4910]: I0105 23:17:59.611724 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/a9bfffd0-f255-43e2-8c45-bf4ce76358ff-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"a9bfffd0-f255-43e2-8c45-bf4ce76358ff\") " pod="openstack/ovn-copy-data" Jan 05 23:17:59 crc kubenswrapper[4910]: I0105 23:17:59.639548 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mvqtl\" (UniqueName: \"kubernetes.io/projected/a9bfffd0-f255-43e2-8c45-bf4ce76358ff-kube-api-access-mvqtl\") pod \"ovn-copy-data\" (UID: \"a9bfffd0-f255-43e2-8c45-bf4ce76358ff\") " pod="openstack/ovn-copy-data" Jan 05 23:17:59 crc kubenswrapper[4910]: I0105 23:17:59.668955 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-d8b234ea-22e6-4ade-a9c8-27976f7bc339\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d8b234ea-22e6-4ade-a9c8-27976f7bc339\") pod \"ovn-copy-data\" (UID: \"a9bfffd0-f255-43e2-8c45-bf4ce76358ff\") " pod="openstack/ovn-copy-data" Jan 05 23:17:59 crc kubenswrapper[4910]: I0105 23:17:59.701749 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Jan 05 23:18:00 crc kubenswrapper[4910]: I0105 23:18:00.335787 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-copy-data"] Jan 05 23:18:00 crc kubenswrapper[4910]: I0105 23:18:00.350638 4910 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 05 23:18:00 crc kubenswrapper[4910]: I0105 23:18:00.545082 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"a9bfffd0-f255-43e2-8c45-bf4ce76358ff","Type":"ContainerStarted","Data":"958bf634f59c415714886e6d671c375db4a70b2f2fa3889fd2e0e034b9fa06c9"} Jan 05 23:18:01 crc kubenswrapper[4910]: I0105 23:18:01.567326 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"a9bfffd0-f255-43e2-8c45-bf4ce76358ff","Type":"ContainerStarted","Data":"e6b44ed6ebab1ebec21f4aa2223abbe0906c7295e224bba2123ef8cf3bfb3251"} Jan 05 23:18:01 crc kubenswrapper[4910]: I0105 23:18:01.594663 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-copy-data" podStartSLOduration=3.11115187 podStartE2EDuration="3.594630994s" podCreationTimestamp="2026-01-05 23:17:58 +0000 UTC" firstStartedPulling="2026-01-05 23:18:00.350233974 +0000 UTC m=+5211.927731684" lastFinishedPulling="2026-01-05 23:18:00.833713098 +0000 UTC m=+5212.411210808" observedRunningTime="2026-01-05 23:18:01.592207974 +0000 UTC m=+5213.169705724" watchObservedRunningTime="2026-01-05 23:18:01.594630994 +0000 UTC m=+5213.172128694" Jan 05 23:18:05 crc kubenswrapper[4910]: E0105 23:18:05.146309 4910 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.166:51416->38.102.83.166:40365: write tcp 38.102.83.166:51416->38.102.83.166:40365: write: broken pipe Jan 05 23:18:06 crc kubenswrapper[4910]: I0105 23:18:06.437175 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-76554fcc87-lm25j" Jan 05 23:18:06 crc kubenswrapper[4910]: I0105 23:18:06.524565 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-699964fbc-9w2bk"] Jan 05 23:18:06 crc kubenswrapper[4910]: I0105 23:18:06.524966 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-699964fbc-9w2bk" podUID="8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9" containerName="dnsmasq-dns" containerID="cri-o://1eb072a223ea45ddd20773726c08a60e0e17f224849fe23f180e06bb599671c8" gracePeriod=10 Jan 05 23:18:07 crc kubenswrapper[4910]: I0105 23:18:07.052821 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-699964fbc-9w2bk" Jan 05 23:18:07 crc kubenswrapper[4910]: I0105 23:18:07.199025 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cmkqf\" (UniqueName: \"kubernetes.io/projected/8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9-kube-api-access-cmkqf\") pod \"8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9\" (UID: \"8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9\") " Jan 05 23:18:07 crc kubenswrapper[4910]: I0105 23:18:07.199537 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9-config\") pod \"8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9\" (UID: \"8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9\") " Jan 05 23:18:07 crc kubenswrapper[4910]: I0105 23:18:07.199582 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9-dns-svc\") pod \"8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9\" (UID: \"8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9\") " Jan 05 23:18:07 crc kubenswrapper[4910]: I0105 23:18:07.217836 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9-kube-api-access-cmkqf" (OuterVolumeSpecName: "kube-api-access-cmkqf") pod "8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9" (UID: "8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9"). InnerVolumeSpecName "kube-api-access-cmkqf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:18:07 crc kubenswrapper[4910]: I0105 23:18:07.255720 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9" (UID: "8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:18:07 crc kubenswrapper[4910]: I0105 23:18:07.275018 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9-config" (OuterVolumeSpecName: "config") pod "8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9" (UID: "8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:18:07 crc kubenswrapper[4910]: I0105 23:18:07.302829 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9-config\") on node \"crc\" DevicePath \"\"" Jan 05 23:18:07 crc kubenswrapper[4910]: I0105 23:18:07.302875 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 05 23:18:07 crc kubenswrapper[4910]: I0105 23:18:07.302935 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cmkqf\" (UniqueName: \"kubernetes.io/projected/8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9-kube-api-access-cmkqf\") on node \"crc\" DevicePath \"\"" Jan 05 23:18:07 crc kubenswrapper[4910]: I0105 23:18:07.636717 4910 generic.go:334] "Generic (PLEG): container finished" podID="8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9" containerID="1eb072a223ea45ddd20773726c08a60e0e17f224849fe23f180e06bb599671c8" exitCode=0 Jan 05 23:18:07 crc kubenswrapper[4910]: I0105 23:18:07.636777 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-699964fbc-9w2bk" event={"ID":"8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9","Type":"ContainerDied","Data":"1eb072a223ea45ddd20773726c08a60e0e17f224849fe23f180e06bb599671c8"} Jan 05 23:18:07 crc kubenswrapper[4910]: I0105 23:18:07.636812 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-699964fbc-9w2bk" event={"ID":"8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9","Type":"ContainerDied","Data":"ca7300e229683eb8c1eca90f8dae76e94a5629e4f770872c0f5eb9ccb9adf26f"} Jan 05 23:18:07 crc kubenswrapper[4910]: I0105 23:18:07.636818 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-699964fbc-9w2bk" Jan 05 23:18:07 crc kubenswrapper[4910]: I0105 23:18:07.636835 4910 scope.go:117] "RemoveContainer" containerID="1eb072a223ea45ddd20773726c08a60e0e17f224849fe23f180e06bb599671c8" Jan 05 23:18:07 crc kubenswrapper[4910]: I0105 23:18:07.727700 4910 scope.go:117] "RemoveContainer" containerID="93e4c55f9177b116015994dd22fa2a6c729c45fe087b2ff6b7d8b4c984512294" Jan 05 23:18:07 crc kubenswrapper[4910]: I0105 23:18:07.732537 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-699964fbc-9w2bk"] Jan 05 23:18:07 crc kubenswrapper[4910]: I0105 23:18:07.746262 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-699964fbc-9w2bk"] Jan 05 23:18:07 crc kubenswrapper[4910]: I0105 23:18:07.750493 4910 scope.go:117] "RemoveContainer" containerID="1eb072a223ea45ddd20773726c08a60e0e17f224849fe23f180e06bb599671c8" Jan 05 23:18:07 crc kubenswrapper[4910]: E0105 23:18:07.751317 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1eb072a223ea45ddd20773726c08a60e0e17f224849fe23f180e06bb599671c8\": container with ID starting with 1eb072a223ea45ddd20773726c08a60e0e17f224849fe23f180e06bb599671c8 not found: ID does not exist" containerID="1eb072a223ea45ddd20773726c08a60e0e17f224849fe23f180e06bb599671c8" Jan 05 23:18:07 crc kubenswrapper[4910]: I0105 23:18:07.751369 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1eb072a223ea45ddd20773726c08a60e0e17f224849fe23f180e06bb599671c8"} err="failed to get container status \"1eb072a223ea45ddd20773726c08a60e0e17f224849fe23f180e06bb599671c8\": rpc error: code = NotFound desc = could not find container \"1eb072a223ea45ddd20773726c08a60e0e17f224849fe23f180e06bb599671c8\": container with ID starting with 1eb072a223ea45ddd20773726c08a60e0e17f224849fe23f180e06bb599671c8 not found: ID does not exist" Jan 05 23:18:07 crc kubenswrapper[4910]: I0105 23:18:07.751403 4910 scope.go:117] "RemoveContainer" containerID="93e4c55f9177b116015994dd22fa2a6c729c45fe087b2ff6b7d8b4c984512294" Jan 05 23:18:07 crc kubenswrapper[4910]: E0105 23:18:07.751831 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"93e4c55f9177b116015994dd22fa2a6c729c45fe087b2ff6b7d8b4c984512294\": container with ID starting with 93e4c55f9177b116015994dd22fa2a6c729c45fe087b2ff6b7d8b4c984512294 not found: ID does not exist" containerID="93e4c55f9177b116015994dd22fa2a6c729c45fe087b2ff6b7d8b4c984512294" Jan 05 23:18:07 crc kubenswrapper[4910]: I0105 23:18:07.751868 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93e4c55f9177b116015994dd22fa2a6c729c45fe087b2ff6b7d8b4c984512294"} err="failed to get container status \"93e4c55f9177b116015994dd22fa2a6c729c45fe087b2ff6b7d8b4c984512294\": rpc error: code = NotFound desc = could not find container \"93e4c55f9177b116015994dd22fa2a6c729c45fe087b2ff6b7d8b4c984512294\": container with ID starting with 93e4c55f9177b116015994dd22fa2a6c729c45fe087b2ff6b7d8b4c984512294 not found: ID does not exist" Jan 05 23:18:07 crc kubenswrapper[4910]: I0105 23:18:07.915941 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Jan 05 23:18:07 crc kubenswrapper[4910]: E0105 23:18:07.916558 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9" containerName="init" Jan 05 23:18:07 crc kubenswrapper[4910]: I0105 23:18:07.916588 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9" containerName="init" Jan 05 23:18:07 crc kubenswrapper[4910]: E0105 23:18:07.916603 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9" containerName="dnsmasq-dns" Jan 05 23:18:07 crc kubenswrapper[4910]: I0105 23:18:07.916616 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9" containerName="dnsmasq-dns" Jan 05 23:18:07 crc kubenswrapper[4910]: I0105 23:18:07.917006 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9" containerName="dnsmasq-dns" Jan 05 23:18:07 crc kubenswrapper[4910]: I0105 23:18:07.926618 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 05 23:18:07 crc kubenswrapper[4910]: I0105 23:18:07.926742 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 05 23:18:07 crc kubenswrapper[4910]: I0105 23:18:07.930184 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Jan 05 23:18:07 crc kubenswrapper[4910]: I0105 23:18:07.930350 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Jan 05 23:18:07 crc kubenswrapper[4910]: I0105 23:18:07.931267 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-mv7kv" Jan 05 23:18:08 crc kubenswrapper[4910]: I0105 23:18:08.028255 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/12828c14-528f-4cf9-823b-acb71c5a4332-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"12828c14-528f-4cf9-823b-acb71c5a4332\") " pod="openstack/ovn-northd-0" Jan 05 23:18:08 crc kubenswrapper[4910]: I0105 23:18:08.028708 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/12828c14-528f-4cf9-823b-acb71c5a4332-scripts\") pod \"ovn-northd-0\" (UID: \"12828c14-528f-4cf9-823b-acb71c5a4332\") " pod="openstack/ovn-northd-0" Jan 05 23:18:08 crc kubenswrapper[4910]: I0105 23:18:08.028784 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12828c14-528f-4cf9-823b-acb71c5a4332-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"12828c14-528f-4cf9-823b-acb71c5a4332\") " pod="openstack/ovn-northd-0" Jan 05 23:18:08 crc kubenswrapper[4910]: I0105 23:18:08.028819 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12828c14-528f-4cf9-823b-acb71c5a4332-config\") pod \"ovn-northd-0\" (UID: \"12828c14-528f-4cf9-823b-acb71c5a4332\") " pod="openstack/ovn-northd-0" Jan 05 23:18:08 crc kubenswrapper[4910]: I0105 23:18:08.028883 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pc4r8\" (UniqueName: \"kubernetes.io/projected/12828c14-528f-4cf9-823b-acb71c5a4332-kube-api-access-pc4r8\") pod \"ovn-northd-0\" (UID: \"12828c14-528f-4cf9-823b-acb71c5a4332\") " pod="openstack/ovn-northd-0" Jan 05 23:18:08 crc kubenswrapper[4910]: I0105 23:18:08.131271 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/12828c14-528f-4cf9-823b-acb71c5a4332-scripts\") pod \"ovn-northd-0\" (UID: \"12828c14-528f-4cf9-823b-acb71c5a4332\") " pod="openstack/ovn-northd-0" Jan 05 23:18:08 crc kubenswrapper[4910]: I0105 23:18:08.131698 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12828c14-528f-4cf9-823b-acb71c5a4332-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"12828c14-528f-4cf9-823b-acb71c5a4332\") " pod="openstack/ovn-northd-0" Jan 05 23:18:08 crc kubenswrapper[4910]: I0105 23:18:08.131870 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12828c14-528f-4cf9-823b-acb71c5a4332-config\") pod \"ovn-northd-0\" (UID: \"12828c14-528f-4cf9-823b-acb71c5a4332\") " pod="openstack/ovn-northd-0" Jan 05 23:18:08 crc kubenswrapper[4910]: I0105 23:18:08.132074 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pc4r8\" (UniqueName: \"kubernetes.io/projected/12828c14-528f-4cf9-823b-acb71c5a4332-kube-api-access-pc4r8\") pod \"ovn-northd-0\" (UID: \"12828c14-528f-4cf9-823b-acb71c5a4332\") " pod="openstack/ovn-northd-0" Jan 05 23:18:08 crc kubenswrapper[4910]: I0105 23:18:08.132246 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/12828c14-528f-4cf9-823b-acb71c5a4332-scripts\") pod \"ovn-northd-0\" (UID: \"12828c14-528f-4cf9-823b-acb71c5a4332\") " pod="openstack/ovn-northd-0" Jan 05 23:18:08 crc kubenswrapper[4910]: I0105 23:18:08.132366 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/12828c14-528f-4cf9-823b-acb71c5a4332-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"12828c14-528f-4cf9-823b-acb71c5a4332\") " pod="openstack/ovn-northd-0" Jan 05 23:18:08 crc kubenswrapper[4910]: I0105 23:18:08.132738 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/12828c14-528f-4cf9-823b-acb71c5a4332-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"12828c14-528f-4cf9-823b-acb71c5a4332\") " pod="openstack/ovn-northd-0" Jan 05 23:18:08 crc kubenswrapper[4910]: I0105 23:18:08.132900 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12828c14-528f-4cf9-823b-acb71c5a4332-config\") pod \"ovn-northd-0\" (UID: \"12828c14-528f-4cf9-823b-acb71c5a4332\") " pod="openstack/ovn-northd-0" Jan 05 23:18:08 crc kubenswrapper[4910]: I0105 23:18:08.138578 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12828c14-528f-4cf9-823b-acb71c5a4332-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"12828c14-528f-4cf9-823b-acb71c5a4332\") " pod="openstack/ovn-northd-0" Jan 05 23:18:08 crc kubenswrapper[4910]: I0105 23:18:08.154383 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pc4r8\" (UniqueName: \"kubernetes.io/projected/12828c14-528f-4cf9-823b-acb71c5a4332-kube-api-access-pc4r8\") pod \"ovn-northd-0\" (UID: \"12828c14-528f-4cf9-823b-acb71c5a4332\") " pod="openstack/ovn-northd-0" Jan 05 23:18:08 crc kubenswrapper[4910]: I0105 23:18:08.249227 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 05 23:18:08 crc kubenswrapper[4910]: I0105 23:18:08.742880 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9" path="/var/lib/kubelet/pods/8c0b90d1-eaaa-4555-898a-5c8abbb6d9a9/volumes" Jan 05 23:18:08 crc kubenswrapper[4910]: I0105 23:18:08.790308 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 05 23:18:08 crc kubenswrapper[4910]: W0105 23:18:08.795009 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod12828c14_528f_4cf9_823b_acb71c5a4332.slice/crio-cfc74b66ce20d9a2ed69a977963ac84e7bef2436b7dece4b98ee5a49ccd3009c WatchSource:0}: Error finding container cfc74b66ce20d9a2ed69a977963ac84e7bef2436b7dece4b98ee5a49ccd3009c: Status 404 returned error can't find the container with id cfc74b66ce20d9a2ed69a977963ac84e7bef2436b7dece4b98ee5a49ccd3009c Jan 05 23:18:09 crc kubenswrapper[4910]: I0105 23:18:09.669190 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"12828c14-528f-4cf9-823b-acb71c5a4332","Type":"ContainerStarted","Data":"84a2744a65ae233bfd563708601f105b0b218e0283762e1d08f0120efa749451"} Jan 05 23:18:09 crc kubenswrapper[4910]: I0105 23:18:09.670041 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"12828c14-528f-4cf9-823b-acb71c5a4332","Type":"ContainerStarted","Data":"358e010746aff188bee9d8d4b8b1ab102763bde3dd654af89dc05345d5f581f8"} Jan 05 23:18:09 crc kubenswrapper[4910]: I0105 23:18:09.670066 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Jan 05 23:18:09 crc kubenswrapper[4910]: I0105 23:18:09.670081 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"12828c14-528f-4cf9-823b-acb71c5a4332","Type":"ContainerStarted","Data":"cfc74b66ce20d9a2ed69a977963ac84e7bef2436b7dece4b98ee5a49ccd3009c"} Jan 05 23:18:09 crc kubenswrapper[4910]: I0105 23:18:09.704941 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.704902639 podStartE2EDuration="2.704902639s" podCreationTimestamp="2026-01-05 23:18:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:18:09.694922792 +0000 UTC m=+5221.272420482" watchObservedRunningTime="2026-01-05 23:18:09.704902639 +0000 UTC m=+5221.282400309" Jan 05 23:18:10 crc kubenswrapper[4910]: I0105 23:18:10.954393 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 23:18:10 crc kubenswrapper[4910]: I0105 23:18:10.954455 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 23:18:13 crc kubenswrapper[4910]: I0105 23:18:13.773509 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-zvqpd"] Jan 05 23:18:13 crc kubenswrapper[4910]: I0105 23:18:13.781811 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-zvqpd" Jan 05 23:18:13 crc kubenswrapper[4910]: I0105 23:18:13.798988 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-zvqpd"] Jan 05 23:18:13 crc kubenswrapper[4910]: I0105 23:18:13.861566 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-ddd0-account-create-update-tvcfw"] Jan 05 23:18:13 crc kubenswrapper[4910]: I0105 23:18:13.862919 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-ddd0-account-create-update-tvcfw" Jan 05 23:18:13 crc kubenswrapper[4910]: I0105 23:18:13.865884 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 05 23:18:13 crc kubenswrapper[4910]: I0105 23:18:13.870533 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-ddd0-account-create-update-tvcfw"] Jan 05 23:18:13 crc kubenswrapper[4910]: I0105 23:18:13.961370 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e2c91f15-103b-471f-8a4b-d6bb07712a59-operator-scripts\") pod \"keystone-ddd0-account-create-update-tvcfw\" (UID: \"e2c91f15-103b-471f-8a4b-d6bb07712a59\") " pod="openstack/keystone-ddd0-account-create-update-tvcfw" Jan 05 23:18:13 crc kubenswrapper[4910]: I0105 23:18:13.961681 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/764cc26a-724f-470e-abf1-5b1a2339da16-operator-scripts\") pod \"keystone-db-create-zvqpd\" (UID: \"764cc26a-724f-470e-abf1-5b1a2339da16\") " pod="openstack/keystone-db-create-zvqpd" Jan 05 23:18:13 crc kubenswrapper[4910]: I0105 23:18:13.961882 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9xn9s\" (UniqueName: \"kubernetes.io/projected/e2c91f15-103b-471f-8a4b-d6bb07712a59-kube-api-access-9xn9s\") pod \"keystone-ddd0-account-create-update-tvcfw\" (UID: \"e2c91f15-103b-471f-8a4b-d6bb07712a59\") " pod="openstack/keystone-ddd0-account-create-update-tvcfw" Jan 05 23:18:13 crc kubenswrapper[4910]: I0105 23:18:13.962067 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xx757\" (UniqueName: \"kubernetes.io/projected/764cc26a-724f-470e-abf1-5b1a2339da16-kube-api-access-xx757\") pod \"keystone-db-create-zvqpd\" (UID: \"764cc26a-724f-470e-abf1-5b1a2339da16\") " pod="openstack/keystone-db-create-zvqpd" Jan 05 23:18:14 crc kubenswrapper[4910]: I0105 23:18:14.063993 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9xn9s\" (UniqueName: \"kubernetes.io/projected/e2c91f15-103b-471f-8a4b-d6bb07712a59-kube-api-access-9xn9s\") pod \"keystone-ddd0-account-create-update-tvcfw\" (UID: \"e2c91f15-103b-471f-8a4b-d6bb07712a59\") " pod="openstack/keystone-ddd0-account-create-update-tvcfw" Jan 05 23:18:14 crc kubenswrapper[4910]: I0105 23:18:14.064183 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xx757\" (UniqueName: \"kubernetes.io/projected/764cc26a-724f-470e-abf1-5b1a2339da16-kube-api-access-xx757\") pod \"keystone-db-create-zvqpd\" (UID: \"764cc26a-724f-470e-abf1-5b1a2339da16\") " pod="openstack/keystone-db-create-zvqpd" Jan 05 23:18:14 crc kubenswrapper[4910]: I0105 23:18:14.064266 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e2c91f15-103b-471f-8a4b-d6bb07712a59-operator-scripts\") pod \"keystone-ddd0-account-create-update-tvcfw\" (UID: \"e2c91f15-103b-471f-8a4b-d6bb07712a59\") " pod="openstack/keystone-ddd0-account-create-update-tvcfw" Jan 05 23:18:14 crc kubenswrapper[4910]: I0105 23:18:14.064316 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/764cc26a-724f-470e-abf1-5b1a2339da16-operator-scripts\") pod \"keystone-db-create-zvqpd\" (UID: \"764cc26a-724f-470e-abf1-5b1a2339da16\") " pod="openstack/keystone-db-create-zvqpd" Jan 05 23:18:14 crc kubenswrapper[4910]: I0105 23:18:14.064995 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e2c91f15-103b-471f-8a4b-d6bb07712a59-operator-scripts\") pod \"keystone-ddd0-account-create-update-tvcfw\" (UID: \"e2c91f15-103b-471f-8a4b-d6bb07712a59\") " pod="openstack/keystone-ddd0-account-create-update-tvcfw" Jan 05 23:18:14 crc kubenswrapper[4910]: I0105 23:18:14.065689 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/764cc26a-724f-470e-abf1-5b1a2339da16-operator-scripts\") pod \"keystone-db-create-zvqpd\" (UID: \"764cc26a-724f-470e-abf1-5b1a2339da16\") " pod="openstack/keystone-db-create-zvqpd" Jan 05 23:18:14 crc kubenswrapper[4910]: I0105 23:18:14.083728 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xx757\" (UniqueName: \"kubernetes.io/projected/764cc26a-724f-470e-abf1-5b1a2339da16-kube-api-access-xx757\") pod \"keystone-db-create-zvqpd\" (UID: \"764cc26a-724f-470e-abf1-5b1a2339da16\") " pod="openstack/keystone-db-create-zvqpd" Jan 05 23:18:14 crc kubenswrapper[4910]: I0105 23:18:14.086449 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9xn9s\" (UniqueName: \"kubernetes.io/projected/e2c91f15-103b-471f-8a4b-d6bb07712a59-kube-api-access-9xn9s\") pod \"keystone-ddd0-account-create-update-tvcfw\" (UID: \"e2c91f15-103b-471f-8a4b-d6bb07712a59\") " pod="openstack/keystone-ddd0-account-create-update-tvcfw" Jan 05 23:18:14 crc kubenswrapper[4910]: I0105 23:18:14.103313 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-zvqpd" Jan 05 23:18:14 crc kubenswrapper[4910]: I0105 23:18:14.181646 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-ddd0-account-create-update-tvcfw" Jan 05 23:18:14 crc kubenswrapper[4910]: W0105 23:18:14.634502 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod764cc26a_724f_470e_abf1_5b1a2339da16.slice/crio-9f036efc92263c7d350d06d4729424e2e7d0fe75faf37cf939116dedbab745e1 WatchSource:0}: Error finding container 9f036efc92263c7d350d06d4729424e2e7d0fe75faf37cf939116dedbab745e1: Status 404 returned error can't find the container with id 9f036efc92263c7d350d06d4729424e2e7d0fe75faf37cf939116dedbab745e1 Jan 05 23:18:14 crc kubenswrapper[4910]: I0105 23:18:14.634729 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-zvqpd"] Jan 05 23:18:14 crc kubenswrapper[4910]: I0105 23:18:14.761177 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-ddd0-account-create-update-tvcfw"] Jan 05 23:18:14 crc kubenswrapper[4910]: W0105 23:18:14.769032 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode2c91f15_103b_471f_8a4b_d6bb07712a59.slice/crio-2096f9617a0c8a760417052e50aaf93fd91aad8dd26f5329bc5f16d6b6cdcecb WatchSource:0}: Error finding container 2096f9617a0c8a760417052e50aaf93fd91aad8dd26f5329bc5f16d6b6cdcecb: Status 404 returned error can't find the container with id 2096f9617a0c8a760417052e50aaf93fd91aad8dd26f5329bc5f16d6b6cdcecb Jan 05 23:18:14 crc kubenswrapper[4910]: I0105 23:18:14.996357 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-ddd0-account-create-update-tvcfw" event={"ID":"e2c91f15-103b-471f-8a4b-d6bb07712a59","Type":"ContainerStarted","Data":"a9b37fdb4f8017e42ee2858351a369f458880c81d4608d761ec1b89f9d314b4b"} Jan 05 23:18:14 crc kubenswrapper[4910]: I0105 23:18:14.996433 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-ddd0-account-create-update-tvcfw" event={"ID":"e2c91f15-103b-471f-8a4b-d6bb07712a59","Type":"ContainerStarted","Data":"2096f9617a0c8a760417052e50aaf93fd91aad8dd26f5329bc5f16d6b6cdcecb"} Jan 05 23:18:14 crc kubenswrapper[4910]: I0105 23:18:14.998321 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-zvqpd" event={"ID":"764cc26a-724f-470e-abf1-5b1a2339da16","Type":"ContainerStarted","Data":"df2d865949d3681db76f318e2b3ba32ad48a3b8b2f8946e10aaf2dcd443ce42c"} Jan 05 23:18:14 crc kubenswrapper[4910]: I0105 23:18:14.998390 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-zvqpd" event={"ID":"764cc26a-724f-470e-abf1-5b1a2339da16","Type":"ContainerStarted","Data":"9f036efc92263c7d350d06d4729424e2e7d0fe75faf37cf939116dedbab745e1"} Jan 05 23:18:15 crc kubenswrapper[4910]: I0105 23:18:15.017067 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-ddd0-account-create-update-tvcfw" podStartSLOduration=2.017046348 podStartE2EDuration="2.017046348s" podCreationTimestamp="2026-01-05 23:18:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:18:15.010853064 +0000 UTC m=+5226.588350734" watchObservedRunningTime="2026-01-05 23:18:15.017046348 +0000 UTC m=+5226.594544018" Jan 05 23:18:15 crc kubenswrapper[4910]: I0105 23:18:15.039509 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-zvqpd" podStartSLOduration=2.039481514 podStartE2EDuration="2.039481514s" podCreationTimestamp="2026-01-05 23:18:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:18:15.025674872 +0000 UTC m=+5226.603172542" watchObservedRunningTime="2026-01-05 23:18:15.039481514 +0000 UTC m=+5226.616979194" Jan 05 23:18:16 crc kubenswrapper[4910]: I0105 23:18:16.017427 4910 generic.go:334] "Generic (PLEG): container finished" podID="764cc26a-724f-470e-abf1-5b1a2339da16" containerID="df2d865949d3681db76f318e2b3ba32ad48a3b8b2f8946e10aaf2dcd443ce42c" exitCode=0 Jan 05 23:18:16 crc kubenswrapper[4910]: I0105 23:18:16.017531 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-zvqpd" event={"ID":"764cc26a-724f-470e-abf1-5b1a2339da16","Type":"ContainerDied","Data":"df2d865949d3681db76f318e2b3ba32ad48a3b8b2f8946e10aaf2dcd443ce42c"} Jan 05 23:18:16 crc kubenswrapper[4910]: I0105 23:18:16.020485 4910 generic.go:334] "Generic (PLEG): container finished" podID="e2c91f15-103b-471f-8a4b-d6bb07712a59" containerID="a9b37fdb4f8017e42ee2858351a369f458880c81d4608d761ec1b89f9d314b4b" exitCode=0 Jan 05 23:18:16 crc kubenswrapper[4910]: I0105 23:18:16.020570 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-ddd0-account-create-update-tvcfw" event={"ID":"e2c91f15-103b-471f-8a4b-d6bb07712a59","Type":"ContainerDied","Data":"a9b37fdb4f8017e42ee2858351a369f458880c81d4608d761ec1b89f9d314b4b"} Jan 05 23:18:17 crc kubenswrapper[4910]: I0105 23:18:17.582954 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-ddd0-account-create-update-tvcfw" Jan 05 23:18:17 crc kubenswrapper[4910]: I0105 23:18:17.589974 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-zvqpd" Jan 05 23:18:17 crc kubenswrapper[4910]: I0105 23:18:17.743703 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xn9s\" (UniqueName: \"kubernetes.io/projected/e2c91f15-103b-471f-8a4b-d6bb07712a59-kube-api-access-9xn9s\") pod \"e2c91f15-103b-471f-8a4b-d6bb07712a59\" (UID: \"e2c91f15-103b-471f-8a4b-d6bb07712a59\") " Jan 05 23:18:17 crc kubenswrapper[4910]: I0105 23:18:17.743816 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xx757\" (UniqueName: \"kubernetes.io/projected/764cc26a-724f-470e-abf1-5b1a2339da16-kube-api-access-xx757\") pod \"764cc26a-724f-470e-abf1-5b1a2339da16\" (UID: \"764cc26a-724f-470e-abf1-5b1a2339da16\") " Jan 05 23:18:17 crc kubenswrapper[4910]: I0105 23:18:17.743877 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/764cc26a-724f-470e-abf1-5b1a2339da16-operator-scripts\") pod \"764cc26a-724f-470e-abf1-5b1a2339da16\" (UID: \"764cc26a-724f-470e-abf1-5b1a2339da16\") " Jan 05 23:18:17 crc kubenswrapper[4910]: I0105 23:18:17.744107 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e2c91f15-103b-471f-8a4b-d6bb07712a59-operator-scripts\") pod \"e2c91f15-103b-471f-8a4b-d6bb07712a59\" (UID: \"e2c91f15-103b-471f-8a4b-d6bb07712a59\") " Jan 05 23:18:17 crc kubenswrapper[4910]: I0105 23:18:17.745399 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/764cc26a-724f-470e-abf1-5b1a2339da16-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "764cc26a-724f-470e-abf1-5b1a2339da16" (UID: "764cc26a-724f-470e-abf1-5b1a2339da16"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:18:17 crc kubenswrapper[4910]: I0105 23:18:17.745486 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e2c91f15-103b-471f-8a4b-d6bb07712a59-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e2c91f15-103b-471f-8a4b-d6bb07712a59" (UID: "e2c91f15-103b-471f-8a4b-d6bb07712a59"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:18:17 crc kubenswrapper[4910]: I0105 23:18:17.759558 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2c91f15-103b-471f-8a4b-d6bb07712a59-kube-api-access-9xn9s" (OuterVolumeSpecName: "kube-api-access-9xn9s") pod "e2c91f15-103b-471f-8a4b-d6bb07712a59" (UID: "e2c91f15-103b-471f-8a4b-d6bb07712a59"). InnerVolumeSpecName "kube-api-access-9xn9s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:18:17 crc kubenswrapper[4910]: I0105 23:18:17.759617 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/764cc26a-724f-470e-abf1-5b1a2339da16-kube-api-access-xx757" (OuterVolumeSpecName: "kube-api-access-xx757") pod "764cc26a-724f-470e-abf1-5b1a2339da16" (UID: "764cc26a-724f-470e-abf1-5b1a2339da16"). InnerVolumeSpecName "kube-api-access-xx757". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:18:17 crc kubenswrapper[4910]: I0105 23:18:17.847675 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xn9s\" (UniqueName: \"kubernetes.io/projected/e2c91f15-103b-471f-8a4b-d6bb07712a59-kube-api-access-9xn9s\") on node \"crc\" DevicePath \"\"" Jan 05 23:18:17 crc kubenswrapper[4910]: I0105 23:18:17.848274 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xx757\" (UniqueName: \"kubernetes.io/projected/764cc26a-724f-470e-abf1-5b1a2339da16-kube-api-access-xx757\") on node \"crc\" DevicePath \"\"" Jan 05 23:18:17 crc kubenswrapper[4910]: I0105 23:18:17.848294 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/764cc26a-724f-470e-abf1-5b1a2339da16-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:18:17 crc kubenswrapper[4910]: I0105 23:18:17.848315 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e2c91f15-103b-471f-8a4b-d6bb07712a59-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:18:18 crc kubenswrapper[4910]: I0105 23:18:18.051245 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-ddd0-account-create-update-tvcfw" Jan 05 23:18:18 crc kubenswrapper[4910]: I0105 23:18:18.053963 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-ddd0-account-create-update-tvcfw" event={"ID":"e2c91f15-103b-471f-8a4b-d6bb07712a59","Type":"ContainerDied","Data":"2096f9617a0c8a760417052e50aaf93fd91aad8dd26f5329bc5f16d6b6cdcecb"} Jan 05 23:18:18 crc kubenswrapper[4910]: I0105 23:18:18.054045 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2096f9617a0c8a760417052e50aaf93fd91aad8dd26f5329bc5f16d6b6cdcecb" Jan 05 23:18:18 crc kubenswrapper[4910]: I0105 23:18:18.061981 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-zvqpd" event={"ID":"764cc26a-724f-470e-abf1-5b1a2339da16","Type":"ContainerDied","Data":"9f036efc92263c7d350d06d4729424e2e7d0fe75faf37cf939116dedbab745e1"} Jan 05 23:18:18 crc kubenswrapper[4910]: I0105 23:18:18.062051 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9f036efc92263c7d350d06d4729424e2e7d0fe75faf37cf939116dedbab745e1" Jan 05 23:18:18 crc kubenswrapper[4910]: I0105 23:18:18.062228 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-zvqpd" Jan 05 23:18:18 crc kubenswrapper[4910]: E0105 23:18:18.259003 4910 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode2c91f15_103b_471f_8a4b_d6bb07712a59.slice\": RecentStats: unable to find data in memory cache]" Jan 05 23:18:19 crc kubenswrapper[4910]: I0105 23:18:19.375672 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-mfj9l"] Jan 05 23:18:19 crc kubenswrapper[4910]: E0105 23:18:19.376069 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="764cc26a-724f-470e-abf1-5b1a2339da16" containerName="mariadb-database-create" Jan 05 23:18:19 crc kubenswrapper[4910]: I0105 23:18:19.376083 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="764cc26a-724f-470e-abf1-5b1a2339da16" containerName="mariadb-database-create" Jan 05 23:18:19 crc kubenswrapper[4910]: E0105 23:18:19.376105 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2c91f15-103b-471f-8a4b-d6bb07712a59" containerName="mariadb-account-create-update" Jan 05 23:18:19 crc kubenswrapper[4910]: I0105 23:18:19.376113 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2c91f15-103b-471f-8a4b-d6bb07712a59" containerName="mariadb-account-create-update" Jan 05 23:18:19 crc kubenswrapper[4910]: I0105 23:18:19.376277 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="764cc26a-724f-470e-abf1-5b1a2339da16" containerName="mariadb-database-create" Jan 05 23:18:19 crc kubenswrapper[4910]: I0105 23:18:19.376291 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2c91f15-103b-471f-8a4b-d6bb07712a59" containerName="mariadb-account-create-update" Jan 05 23:18:19 crc kubenswrapper[4910]: I0105 23:18:19.376871 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-mfj9l" Jan 05 23:18:19 crc kubenswrapper[4910]: I0105 23:18:19.379349 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 05 23:18:19 crc kubenswrapper[4910]: I0105 23:18:19.379454 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 05 23:18:19 crc kubenswrapper[4910]: I0105 23:18:19.382986 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 05 23:18:19 crc kubenswrapper[4910]: I0105 23:18:19.384197 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-hll75" Jan 05 23:18:19 crc kubenswrapper[4910]: I0105 23:18:19.389055 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-mfj9l"] Jan 05 23:18:19 crc kubenswrapper[4910]: I0105 23:18:19.484994 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2a5de11-93f1-4057-bd1e-f5752a9ffc19-combined-ca-bundle\") pod \"keystone-db-sync-mfj9l\" (UID: \"a2a5de11-93f1-4057-bd1e-f5752a9ffc19\") " pod="openstack/keystone-db-sync-mfj9l" Jan 05 23:18:19 crc kubenswrapper[4910]: I0105 23:18:19.485110 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmwcn\" (UniqueName: \"kubernetes.io/projected/a2a5de11-93f1-4057-bd1e-f5752a9ffc19-kube-api-access-mmwcn\") pod \"keystone-db-sync-mfj9l\" (UID: \"a2a5de11-93f1-4057-bd1e-f5752a9ffc19\") " pod="openstack/keystone-db-sync-mfj9l" Jan 05 23:18:19 crc kubenswrapper[4910]: I0105 23:18:19.485200 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2a5de11-93f1-4057-bd1e-f5752a9ffc19-config-data\") pod \"keystone-db-sync-mfj9l\" (UID: \"a2a5de11-93f1-4057-bd1e-f5752a9ffc19\") " pod="openstack/keystone-db-sync-mfj9l" Jan 05 23:18:19 crc kubenswrapper[4910]: I0105 23:18:19.588203 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mmwcn\" (UniqueName: \"kubernetes.io/projected/a2a5de11-93f1-4057-bd1e-f5752a9ffc19-kube-api-access-mmwcn\") pod \"keystone-db-sync-mfj9l\" (UID: \"a2a5de11-93f1-4057-bd1e-f5752a9ffc19\") " pod="openstack/keystone-db-sync-mfj9l" Jan 05 23:18:19 crc kubenswrapper[4910]: I0105 23:18:19.588269 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2a5de11-93f1-4057-bd1e-f5752a9ffc19-config-data\") pod \"keystone-db-sync-mfj9l\" (UID: \"a2a5de11-93f1-4057-bd1e-f5752a9ffc19\") " pod="openstack/keystone-db-sync-mfj9l" Jan 05 23:18:19 crc kubenswrapper[4910]: I0105 23:18:19.588342 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2a5de11-93f1-4057-bd1e-f5752a9ffc19-combined-ca-bundle\") pod \"keystone-db-sync-mfj9l\" (UID: \"a2a5de11-93f1-4057-bd1e-f5752a9ffc19\") " pod="openstack/keystone-db-sync-mfj9l" Jan 05 23:18:19 crc kubenswrapper[4910]: I0105 23:18:19.597043 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2a5de11-93f1-4057-bd1e-f5752a9ffc19-config-data\") pod \"keystone-db-sync-mfj9l\" (UID: \"a2a5de11-93f1-4057-bd1e-f5752a9ffc19\") " pod="openstack/keystone-db-sync-mfj9l" Jan 05 23:18:19 crc kubenswrapper[4910]: I0105 23:18:19.600830 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2a5de11-93f1-4057-bd1e-f5752a9ffc19-combined-ca-bundle\") pod \"keystone-db-sync-mfj9l\" (UID: \"a2a5de11-93f1-4057-bd1e-f5752a9ffc19\") " pod="openstack/keystone-db-sync-mfj9l" Jan 05 23:18:19 crc kubenswrapper[4910]: I0105 23:18:19.609642 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mmwcn\" (UniqueName: \"kubernetes.io/projected/a2a5de11-93f1-4057-bd1e-f5752a9ffc19-kube-api-access-mmwcn\") pod \"keystone-db-sync-mfj9l\" (UID: \"a2a5de11-93f1-4057-bd1e-f5752a9ffc19\") " pod="openstack/keystone-db-sync-mfj9l" Jan 05 23:18:19 crc kubenswrapper[4910]: I0105 23:18:19.702509 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-mfj9l" Jan 05 23:18:20 crc kubenswrapper[4910]: I0105 23:18:20.191694 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-mfj9l"] Jan 05 23:18:21 crc kubenswrapper[4910]: I0105 23:18:21.099035 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-mfj9l" event={"ID":"a2a5de11-93f1-4057-bd1e-f5752a9ffc19","Type":"ContainerStarted","Data":"1e420b4243fd90e6496d0d09bf20ff32be3ff121b6f6d92898005bc2d4b4d096"} Jan 05 23:18:21 crc kubenswrapper[4910]: I0105 23:18:21.099503 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-mfj9l" event={"ID":"a2a5de11-93f1-4057-bd1e-f5752a9ffc19","Type":"ContainerStarted","Data":"eb9454f84f97a1c0d550e4cbf2ee7464ded65883e727db4a616ffd2f60fcf294"} Jan 05 23:18:21 crc kubenswrapper[4910]: I0105 23:18:21.136804 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-mfj9l" podStartSLOduration=2.136776628 podStartE2EDuration="2.136776628s" podCreationTimestamp="2026-01-05 23:18:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:18:21.128590596 +0000 UTC m=+5232.706088316" watchObservedRunningTime="2026-01-05 23:18:21.136776628 +0000 UTC m=+5232.714274308" Jan 05 23:18:22 crc kubenswrapper[4910]: I0105 23:18:22.116008 4910 generic.go:334] "Generic (PLEG): container finished" podID="a2a5de11-93f1-4057-bd1e-f5752a9ffc19" containerID="1e420b4243fd90e6496d0d09bf20ff32be3ff121b6f6d92898005bc2d4b4d096" exitCode=0 Jan 05 23:18:22 crc kubenswrapper[4910]: I0105 23:18:22.116082 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-mfj9l" event={"ID":"a2a5de11-93f1-4057-bd1e-f5752a9ffc19","Type":"ContainerDied","Data":"1e420b4243fd90e6496d0d09bf20ff32be3ff121b6f6d92898005bc2d4b4d096"} Jan 05 23:18:23 crc kubenswrapper[4910]: I0105 23:18:23.354876 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Jan 05 23:18:23 crc kubenswrapper[4910]: I0105 23:18:23.576691 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-mfj9l" Jan 05 23:18:23 crc kubenswrapper[4910]: I0105 23:18:23.677923 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2a5de11-93f1-4057-bd1e-f5752a9ffc19-combined-ca-bundle\") pod \"a2a5de11-93f1-4057-bd1e-f5752a9ffc19\" (UID: \"a2a5de11-93f1-4057-bd1e-f5752a9ffc19\") " Jan 05 23:18:23 crc kubenswrapper[4910]: I0105 23:18:23.678487 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mmwcn\" (UniqueName: \"kubernetes.io/projected/a2a5de11-93f1-4057-bd1e-f5752a9ffc19-kube-api-access-mmwcn\") pod \"a2a5de11-93f1-4057-bd1e-f5752a9ffc19\" (UID: \"a2a5de11-93f1-4057-bd1e-f5752a9ffc19\") " Jan 05 23:18:23 crc kubenswrapper[4910]: I0105 23:18:23.678551 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2a5de11-93f1-4057-bd1e-f5752a9ffc19-config-data\") pod \"a2a5de11-93f1-4057-bd1e-f5752a9ffc19\" (UID: \"a2a5de11-93f1-4057-bd1e-f5752a9ffc19\") " Jan 05 23:18:23 crc kubenswrapper[4910]: I0105 23:18:23.686426 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2a5de11-93f1-4057-bd1e-f5752a9ffc19-kube-api-access-mmwcn" (OuterVolumeSpecName: "kube-api-access-mmwcn") pod "a2a5de11-93f1-4057-bd1e-f5752a9ffc19" (UID: "a2a5de11-93f1-4057-bd1e-f5752a9ffc19"). InnerVolumeSpecName "kube-api-access-mmwcn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:18:23 crc kubenswrapper[4910]: I0105 23:18:23.715306 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2a5de11-93f1-4057-bd1e-f5752a9ffc19-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a2a5de11-93f1-4057-bd1e-f5752a9ffc19" (UID: "a2a5de11-93f1-4057-bd1e-f5752a9ffc19"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:18:23 crc kubenswrapper[4910]: I0105 23:18:23.744441 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2a5de11-93f1-4057-bd1e-f5752a9ffc19-config-data" (OuterVolumeSpecName: "config-data") pod "a2a5de11-93f1-4057-bd1e-f5752a9ffc19" (UID: "a2a5de11-93f1-4057-bd1e-f5752a9ffc19"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:18:23 crc kubenswrapper[4910]: I0105 23:18:23.780768 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2a5de11-93f1-4057-bd1e-f5752a9ffc19-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:18:23 crc kubenswrapper[4910]: I0105 23:18:23.780809 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mmwcn\" (UniqueName: \"kubernetes.io/projected/a2a5de11-93f1-4057-bd1e-f5752a9ffc19-kube-api-access-mmwcn\") on node \"crc\" DevicePath \"\"" Jan 05 23:18:23 crc kubenswrapper[4910]: I0105 23:18:23.780826 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2a5de11-93f1-4057-bd1e-f5752a9ffc19-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.138593 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-mfj9l" event={"ID":"a2a5de11-93f1-4057-bd1e-f5752a9ffc19","Type":"ContainerDied","Data":"eb9454f84f97a1c0d550e4cbf2ee7464ded65883e727db4a616ffd2f60fcf294"} Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.138641 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eb9454f84f97a1c0d550e4cbf2ee7464ded65883e727db4a616ffd2f60fcf294" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.138698 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-mfj9l" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.430431 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5cc49cdcf-q9cm4"] Jan 05 23:18:24 crc kubenswrapper[4910]: E0105 23:18:24.430762 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2a5de11-93f1-4057-bd1e-f5752a9ffc19" containerName="keystone-db-sync" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.430775 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2a5de11-93f1-4057-bd1e-f5752a9ffc19" containerName="keystone-db-sync" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.430948 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2a5de11-93f1-4057-bd1e-f5752a9ffc19" containerName="keystone-db-sync" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.431792 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cc49cdcf-q9cm4" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.449318 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5cc49cdcf-q9cm4"] Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.464835 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-qhc8q"] Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.466097 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-qhc8q" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.471158 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.471267 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.472675 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-hll75" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.472848 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.472974 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.504169 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-qhc8q"] Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.595835 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a70442a9-ce4d-4188-b14c-58c4dd655495-combined-ca-bundle\") pod \"keystone-bootstrap-qhc8q\" (UID: \"a70442a9-ce4d-4188-b14c-58c4dd655495\") " pod="openstack/keystone-bootstrap-qhc8q" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.595882 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1de3ba7c-4d47-4dec-bddb-d6f6da05f871-ovsdbserver-nb\") pod \"dnsmasq-dns-5cc49cdcf-q9cm4\" (UID: \"1de3ba7c-4d47-4dec-bddb-d6f6da05f871\") " pod="openstack/dnsmasq-dns-5cc49cdcf-q9cm4" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.595912 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a70442a9-ce4d-4188-b14c-58c4dd655495-credential-keys\") pod \"keystone-bootstrap-qhc8q\" (UID: \"a70442a9-ce4d-4188-b14c-58c4dd655495\") " pod="openstack/keystone-bootstrap-qhc8q" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.596090 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lgjkf\" (UniqueName: \"kubernetes.io/projected/a70442a9-ce4d-4188-b14c-58c4dd655495-kube-api-access-lgjkf\") pod \"keystone-bootstrap-qhc8q\" (UID: \"a70442a9-ce4d-4188-b14c-58c4dd655495\") " pod="openstack/keystone-bootstrap-qhc8q" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.596254 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1de3ba7c-4d47-4dec-bddb-d6f6da05f871-config\") pod \"dnsmasq-dns-5cc49cdcf-q9cm4\" (UID: \"1de3ba7c-4d47-4dec-bddb-d6f6da05f871\") " pod="openstack/dnsmasq-dns-5cc49cdcf-q9cm4" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.596390 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7hst5\" (UniqueName: \"kubernetes.io/projected/1de3ba7c-4d47-4dec-bddb-d6f6da05f871-kube-api-access-7hst5\") pod \"dnsmasq-dns-5cc49cdcf-q9cm4\" (UID: \"1de3ba7c-4d47-4dec-bddb-d6f6da05f871\") " pod="openstack/dnsmasq-dns-5cc49cdcf-q9cm4" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.596424 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a70442a9-ce4d-4188-b14c-58c4dd655495-config-data\") pod \"keystone-bootstrap-qhc8q\" (UID: \"a70442a9-ce4d-4188-b14c-58c4dd655495\") " pod="openstack/keystone-bootstrap-qhc8q" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.596508 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1de3ba7c-4d47-4dec-bddb-d6f6da05f871-ovsdbserver-sb\") pod \"dnsmasq-dns-5cc49cdcf-q9cm4\" (UID: \"1de3ba7c-4d47-4dec-bddb-d6f6da05f871\") " pod="openstack/dnsmasq-dns-5cc49cdcf-q9cm4" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.596554 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a70442a9-ce4d-4188-b14c-58c4dd655495-scripts\") pod \"keystone-bootstrap-qhc8q\" (UID: \"a70442a9-ce4d-4188-b14c-58c4dd655495\") " pod="openstack/keystone-bootstrap-qhc8q" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.596781 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1de3ba7c-4d47-4dec-bddb-d6f6da05f871-dns-svc\") pod \"dnsmasq-dns-5cc49cdcf-q9cm4\" (UID: \"1de3ba7c-4d47-4dec-bddb-d6f6da05f871\") " pod="openstack/dnsmasq-dns-5cc49cdcf-q9cm4" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.597038 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a70442a9-ce4d-4188-b14c-58c4dd655495-fernet-keys\") pod \"keystone-bootstrap-qhc8q\" (UID: \"a70442a9-ce4d-4188-b14c-58c4dd655495\") " pod="openstack/keystone-bootstrap-qhc8q" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.698886 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a70442a9-ce4d-4188-b14c-58c4dd655495-fernet-keys\") pod \"keystone-bootstrap-qhc8q\" (UID: \"a70442a9-ce4d-4188-b14c-58c4dd655495\") " pod="openstack/keystone-bootstrap-qhc8q" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.699019 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a70442a9-ce4d-4188-b14c-58c4dd655495-combined-ca-bundle\") pod \"keystone-bootstrap-qhc8q\" (UID: \"a70442a9-ce4d-4188-b14c-58c4dd655495\") " pod="openstack/keystone-bootstrap-qhc8q" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.699041 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1de3ba7c-4d47-4dec-bddb-d6f6da05f871-ovsdbserver-nb\") pod \"dnsmasq-dns-5cc49cdcf-q9cm4\" (UID: \"1de3ba7c-4d47-4dec-bddb-d6f6da05f871\") " pod="openstack/dnsmasq-dns-5cc49cdcf-q9cm4" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.699077 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a70442a9-ce4d-4188-b14c-58c4dd655495-credential-keys\") pod \"keystone-bootstrap-qhc8q\" (UID: \"a70442a9-ce4d-4188-b14c-58c4dd655495\") " pod="openstack/keystone-bootstrap-qhc8q" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.699108 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lgjkf\" (UniqueName: \"kubernetes.io/projected/a70442a9-ce4d-4188-b14c-58c4dd655495-kube-api-access-lgjkf\") pod \"keystone-bootstrap-qhc8q\" (UID: \"a70442a9-ce4d-4188-b14c-58c4dd655495\") " pod="openstack/keystone-bootstrap-qhc8q" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.699156 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1de3ba7c-4d47-4dec-bddb-d6f6da05f871-config\") pod \"dnsmasq-dns-5cc49cdcf-q9cm4\" (UID: \"1de3ba7c-4d47-4dec-bddb-d6f6da05f871\") " pod="openstack/dnsmasq-dns-5cc49cdcf-q9cm4" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.699191 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7hst5\" (UniqueName: \"kubernetes.io/projected/1de3ba7c-4d47-4dec-bddb-d6f6da05f871-kube-api-access-7hst5\") pod \"dnsmasq-dns-5cc49cdcf-q9cm4\" (UID: \"1de3ba7c-4d47-4dec-bddb-d6f6da05f871\") " pod="openstack/dnsmasq-dns-5cc49cdcf-q9cm4" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.699212 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a70442a9-ce4d-4188-b14c-58c4dd655495-config-data\") pod \"keystone-bootstrap-qhc8q\" (UID: \"a70442a9-ce4d-4188-b14c-58c4dd655495\") " pod="openstack/keystone-bootstrap-qhc8q" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.699239 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1de3ba7c-4d47-4dec-bddb-d6f6da05f871-ovsdbserver-sb\") pod \"dnsmasq-dns-5cc49cdcf-q9cm4\" (UID: \"1de3ba7c-4d47-4dec-bddb-d6f6da05f871\") " pod="openstack/dnsmasq-dns-5cc49cdcf-q9cm4" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.699259 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a70442a9-ce4d-4188-b14c-58c4dd655495-scripts\") pod \"keystone-bootstrap-qhc8q\" (UID: \"a70442a9-ce4d-4188-b14c-58c4dd655495\") " pod="openstack/keystone-bootstrap-qhc8q" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.699279 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1de3ba7c-4d47-4dec-bddb-d6f6da05f871-dns-svc\") pod \"dnsmasq-dns-5cc49cdcf-q9cm4\" (UID: \"1de3ba7c-4d47-4dec-bddb-d6f6da05f871\") " pod="openstack/dnsmasq-dns-5cc49cdcf-q9cm4" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.700290 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1de3ba7c-4d47-4dec-bddb-d6f6da05f871-ovsdbserver-nb\") pod \"dnsmasq-dns-5cc49cdcf-q9cm4\" (UID: \"1de3ba7c-4d47-4dec-bddb-d6f6da05f871\") " pod="openstack/dnsmasq-dns-5cc49cdcf-q9cm4" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.700304 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1de3ba7c-4d47-4dec-bddb-d6f6da05f871-ovsdbserver-sb\") pod \"dnsmasq-dns-5cc49cdcf-q9cm4\" (UID: \"1de3ba7c-4d47-4dec-bddb-d6f6da05f871\") " pod="openstack/dnsmasq-dns-5cc49cdcf-q9cm4" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.700808 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1de3ba7c-4d47-4dec-bddb-d6f6da05f871-dns-svc\") pod \"dnsmasq-dns-5cc49cdcf-q9cm4\" (UID: \"1de3ba7c-4d47-4dec-bddb-d6f6da05f871\") " pod="openstack/dnsmasq-dns-5cc49cdcf-q9cm4" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.704766 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1de3ba7c-4d47-4dec-bddb-d6f6da05f871-config\") pod \"dnsmasq-dns-5cc49cdcf-q9cm4\" (UID: \"1de3ba7c-4d47-4dec-bddb-d6f6da05f871\") " pod="openstack/dnsmasq-dns-5cc49cdcf-q9cm4" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.707366 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a70442a9-ce4d-4188-b14c-58c4dd655495-combined-ca-bundle\") pod \"keystone-bootstrap-qhc8q\" (UID: \"a70442a9-ce4d-4188-b14c-58c4dd655495\") " pod="openstack/keystone-bootstrap-qhc8q" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.707465 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a70442a9-ce4d-4188-b14c-58c4dd655495-config-data\") pod \"keystone-bootstrap-qhc8q\" (UID: \"a70442a9-ce4d-4188-b14c-58c4dd655495\") " pod="openstack/keystone-bootstrap-qhc8q" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.707576 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a70442a9-ce4d-4188-b14c-58c4dd655495-scripts\") pod \"keystone-bootstrap-qhc8q\" (UID: \"a70442a9-ce4d-4188-b14c-58c4dd655495\") " pod="openstack/keystone-bootstrap-qhc8q" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.712915 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a70442a9-ce4d-4188-b14c-58c4dd655495-fernet-keys\") pod \"keystone-bootstrap-qhc8q\" (UID: \"a70442a9-ce4d-4188-b14c-58c4dd655495\") " pod="openstack/keystone-bootstrap-qhc8q" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.713939 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a70442a9-ce4d-4188-b14c-58c4dd655495-credential-keys\") pod \"keystone-bootstrap-qhc8q\" (UID: \"a70442a9-ce4d-4188-b14c-58c4dd655495\") " pod="openstack/keystone-bootstrap-qhc8q" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.716962 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7hst5\" (UniqueName: \"kubernetes.io/projected/1de3ba7c-4d47-4dec-bddb-d6f6da05f871-kube-api-access-7hst5\") pod \"dnsmasq-dns-5cc49cdcf-q9cm4\" (UID: \"1de3ba7c-4d47-4dec-bddb-d6f6da05f871\") " pod="openstack/dnsmasq-dns-5cc49cdcf-q9cm4" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.719826 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lgjkf\" (UniqueName: \"kubernetes.io/projected/a70442a9-ce4d-4188-b14c-58c4dd655495-kube-api-access-lgjkf\") pod \"keystone-bootstrap-qhc8q\" (UID: \"a70442a9-ce4d-4188-b14c-58c4dd655495\") " pod="openstack/keystone-bootstrap-qhc8q" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.753866 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cc49cdcf-q9cm4" Jan 05 23:18:24 crc kubenswrapper[4910]: I0105 23:18:24.787399 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-qhc8q" Jan 05 23:18:25 crc kubenswrapper[4910]: W0105 23:18:25.255237 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1de3ba7c_4d47_4dec_bddb_d6f6da05f871.slice/crio-7fb1c418cf94deabd7f886a672d6255444a0d932c814f8b0cc9b5d58d1500c4c WatchSource:0}: Error finding container 7fb1c418cf94deabd7f886a672d6255444a0d932c814f8b0cc9b5d58d1500c4c: Status 404 returned error can't find the container with id 7fb1c418cf94deabd7f886a672d6255444a0d932c814f8b0cc9b5d58d1500c4c Jan 05 23:18:25 crc kubenswrapper[4910]: I0105 23:18:25.257503 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5cc49cdcf-q9cm4"] Jan 05 23:18:25 crc kubenswrapper[4910]: W0105 23:18:25.333553 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda70442a9_ce4d_4188_b14c_58c4dd655495.slice/crio-ff335ad4cf3d6837c847222236b86797f52a5c8da3d26b244f6ca91a9f557792 WatchSource:0}: Error finding container ff335ad4cf3d6837c847222236b86797f52a5c8da3d26b244f6ca91a9f557792: Status 404 returned error can't find the container with id ff335ad4cf3d6837c847222236b86797f52a5c8da3d26b244f6ca91a9f557792 Jan 05 23:18:25 crc kubenswrapper[4910]: I0105 23:18:25.337527 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-qhc8q"] Jan 05 23:18:26 crc kubenswrapper[4910]: I0105 23:18:26.161286 4910 generic.go:334] "Generic (PLEG): container finished" podID="1de3ba7c-4d47-4dec-bddb-d6f6da05f871" containerID="4d138ace8cf7535bc6bd25cc8054ba26fa479e739f9bac4d6f29589ff4b77714" exitCode=0 Jan 05 23:18:26 crc kubenswrapper[4910]: I0105 23:18:26.162844 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cc49cdcf-q9cm4" event={"ID":"1de3ba7c-4d47-4dec-bddb-d6f6da05f871","Type":"ContainerDied","Data":"4d138ace8cf7535bc6bd25cc8054ba26fa479e739f9bac4d6f29589ff4b77714"} Jan 05 23:18:26 crc kubenswrapper[4910]: I0105 23:18:26.162875 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cc49cdcf-q9cm4" event={"ID":"1de3ba7c-4d47-4dec-bddb-d6f6da05f871","Type":"ContainerStarted","Data":"7fb1c418cf94deabd7f886a672d6255444a0d932c814f8b0cc9b5d58d1500c4c"} Jan 05 23:18:26 crc kubenswrapper[4910]: I0105 23:18:26.168998 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-qhc8q" event={"ID":"a70442a9-ce4d-4188-b14c-58c4dd655495","Type":"ContainerStarted","Data":"3b2ed3a23f288bc632fd143388dd9ed43057aa911b20659d96f37951f73197e0"} Jan 05 23:18:26 crc kubenswrapper[4910]: I0105 23:18:26.169054 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-qhc8q" event={"ID":"a70442a9-ce4d-4188-b14c-58c4dd655495","Type":"ContainerStarted","Data":"ff335ad4cf3d6837c847222236b86797f52a5c8da3d26b244f6ca91a9f557792"} Jan 05 23:18:26 crc kubenswrapper[4910]: I0105 23:18:26.228633 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-qhc8q" podStartSLOduration=2.228601911 podStartE2EDuration="2.228601911s" podCreationTimestamp="2026-01-05 23:18:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:18:26.217770533 +0000 UTC m=+5237.795268203" watchObservedRunningTime="2026-01-05 23:18:26.228601911 +0000 UTC m=+5237.806099611" Jan 05 23:18:27 crc kubenswrapper[4910]: I0105 23:18:27.181247 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cc49cdcf-q9cm4" event={"ID":"1de3ba7c-4d47-4dec-bddb-d6f6da05f871","Type":"ContainerStarted","Data":"cc1bb5fac787877bf70da3287ece341624bea5fed757fd009beec1e11dc80a79"} Jan 05 23:18:27 crc kubenswrapper[4910]: I0105 23:18:27.218383 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5cc49cdcf-q9cm4" podStartSLOduration=3.218344265 podStartE2EDuration="3.218344265s" podCreationTimestamp="2026-01-05 23:18:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:18:27.201036447 +0000 UTC m=+5238.778534127" watchObservedRunningTime="2026-01-05 23:18:27.218344265 +0000 UTC m=+5238.795841975" Jan 05 23:18:28 crc kubenswrapper[4910]: I0105 23:18:28.191669 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5cc49cdcf-q9cm4" Jan 05 23:18:29 crc kubenswrapper[4910]: I0105 23:18:29.205732 4910 generic.go:334] "Generic (PLEG): container finished" podID="a70442a9-ce4d-4188-b14c-58c4dd655495" containerID="3b2ed3a23f288bc632fd143388dd9ed43057aa911b20659d96f37951f73197e0" exitCode=0 Jan 05 23:18:29 crc kubenswrapper[4910]: I0105 23:18:29.205842 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-qhc8q" event={"ID":"a70442a9-ce4d-4188-b14c-58c4dd655495","Type":"ContainerDied","Data":"3b2ed3a23f288bc632fd143388dd9ed43057aa911b20659d96f37951f73197e0"} Jan 05 23:18:30 crc kubenswrapper[4910]: I0105 23:18:30.712523 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-qhc8q" Jan 05 23:18:30 crc kubenswrapper[4910]: I0105 23:18:30.823829 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lgjkf\" (UniqueName: \"kubernetes.io/projected/a70442a9-ce4d-4188-b14c-58c4dd655495-kube-api-access-lgjkf\") pod \"a70442a9-ce4d-4188-b14c-58c4dd655495\" (UID: \"a70442a9-ce4d-4188-b14c-58c4dd655495\") " Jan 05 23:18:30 crc kubenswrapper[4910]: I0105 23:18:30.823947 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a70442a9-ce4d-4188-b14c-58c4dd655495-fernet-keys\") pod \"a70442a9-ce4d-4188-b14c-58c4dd655495\" (UID: \"a70442a9-ce4d-4188-b14c-58c4dd655495\") " Jan 05 23:18:30 crc kubenswrapper[4910]: I0105 23:18:30.824012 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a70442a9-ce4d-4188-b14c-58c4dd655495-combined-ca-bundle\") pod \"a70442a9-ce4d-4188-b14c-58c4dd655495\" (UID: \"a70442a9-ce4d-4188-b14c-58c4dd655495\") " Jan 05 23:18:30 crc kubenswrapper[4910]: I0105 23:18:30.824154 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a70442a9-ce4d-4188-b14c-58c4dd655495-scripts\") pod \"a70442a9-ce4d-4188-b14c-58c4dd655495\" (UID: \"a70442a9-ce4d-4188-b14c-58c4dd655495\") " Jan 05 23:18:30 crc kubenswrapper[4910]: I0105 23:18:30.824236 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a70442a9-ce4d-4188-b14c-58c4dd655495-credential-keys\") pod \"a70442a9-ce4d-4188-b14c-58c4dd655495\" (UID: \"a70442a9-ce4d-4188-b14c-58c4dd655495\") " Jan 05 23:18:30 crc kubenswrapper[4910]: I0105 23:18:30.824276 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a70442a9-ce4d-4188-b14c-58c4dd655495-config-data\") pod \"a70442a9-ce4d-4188-b14c-58c4dd655495\" (UID: \"a70442a9-ce4d-4188-b14c-58c4dd655495\") " Jan 05 23:18:30 crc kubenswrapper[4910]: I0105 23:18:30.832043 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a70442a9-ce4d-4188-b14c-58c4dd655495-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "a70442a9-ce4d-4188-b14c-58c4dd655495" (UID: "a70442a9-ce4d-4188-b14c-58c4dd655495"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:18:30 crc kubenswrapper[4910]: I0105 23:18:30.832647 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a70442a9-ce4d-4188-b14c-58c4dd655495-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "a70442a9-ce4d-4188-b14c-58c4dd655495" (UID: "a70442a9-ce4d-4188-b14c-58c4dd655495"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:18:30 crc kubenswrapper[4910]: I0105 23:18:30.832727 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a70442a9-ce4d-4188-b14c-58c4dd655495-scripts" (OuterVolumeSpecName: "scripts") pod "a70442a9-ce4d-4188-b14c-58c4dd655495" (UID: "a70442a9-ce4d-4188-b14c-58c4dd655495"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:18:30 crc kubenswrapper[4910]: I0105 23:18:30.834662 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a70442a9-ce4d-4188-b14c-58c4dd655495-kube-api-access-lgjkf" (OuterVolumeSpecName: "kube-api-access-lgjkf") pod "a70442a9-ce4d-4188-b14c-58c4dd655495" (UID: "a70442a9-ce4d-4188-b14c-58c4dd655495"). InnerVolumeSpecName "kube-api-access-lgjkf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:18:30 crc kubenswrapper[4910]: I0105 23:18:30.866093 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a70442a9-ce4d-4188-b14c-58c4dd655495-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a70442a9-ce4d-4188-b14c-58c4dd655495" (UID: "a70442a9-ce4d-4188-b14c-58c4dd655495"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:18:30 crc kubenswrapper[4910]: I0105 23:18:30.868144 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a70442a9-ce4d-4188-b14c-58c4dd655495-config-data" (OuterVolumeSpecName: "config-data") pod "a70442a9-ce4d-4188-b14c-58c4dd655495" (UID: "a70442a9-ce4d-4188-b14c-58c4dd655495"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:18:30 crc kubenswrapper[4910]: I0105 23:18:30.927316 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lgjkf\" (UniqueName: \"kubernetes.io/projected/a70442a9-ce4d-4188-b14c-58c4dd655495-kube-api-access-lgjkf\") on node \"crc\" DevicePath \"\"" Jan 05 23:18:30 crc kubenswrapper[4910]: I0105 23:18:30.927399 4910 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a70442a9-ce4d-4188-b14c-58c4dd655495-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 05 23:18:30 crc kubenswrapper[4910]: I0105 23:18:30.927429 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a70442a9-ce4d-4188-b14c-58c4dd655495-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:18:30 crc kubenswrapper[4910]: I0105 23:18:30.927454 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a70442a9-ce4d-4188-b14c-58c4dd655495-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:18:30 crc kubenswrapper[4910]: I0105 23:18:30.927479 4910 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a70442a9-ce4d-4188-b14c-58c4dd655495-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 05 23:18:30 crc kubenswrapper[4910]: I0105 23:18:30.927501 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a70442a9-ce4d-4188-b14c-58c4dd655495-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.234676 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-qhc8q" event={"ID":"a70442a9-ce4d-4188-b14c-58c4dd655495","Type":"ContainerDied","Data":"ff335ad4cf3d6837c847222236b86797f52a5c8da3d26b244f6ca91a9f557792"} Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.234748 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ff335ad4cf3d6837c847222236b86797f52a5c8da3d26b244f6ca91a9f557792" Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.234873 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-qhc8q" Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.359941 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-qhc8q"] Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.377891 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-qhc8q"] Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.446374 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-2527p"] Jan 05 23:18:31 crc kubenswrapper[4910]: E0105 23:18:31.446773 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a70442a9-ce4d-4188-b14c-58c4dd655495" containerName="keystone-bootstrap" Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.446789 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a70442a9-ce4d-4188-b14c-58c4dd655495" containerName="keystone-bootstrap" Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.447049 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="a70442a9-ce4d-4188-b14c-58c4dd655495" containerName="keystone-bootstrap" Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.447800 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-2527p" Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.450795 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.451873 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.451941 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-hll75" Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.451962 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.456682 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.478618 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-2527p"] Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.539261 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-scripts\") pod \"keystone-bootstrap-2527p\" (UID: \"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf\") " pod="openstack/keystone-bootstrap-2527p" Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.539306 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-fernet-keys\") pod \"keystone-bootstrap-2527p\" (UID: \"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf\") " pod="openstack/keystone-bootstrap-2527p" Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.539367 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-credential-keys\") pod \"keystone-bootstrap-2527p\" (UID: \"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf\") " pod="openstack/keystone-bootstrap-2527p" Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.539453 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmfzm\" (UniqueName: \"kubernetes.io/projected/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-kube-api-access-nmfzm\") pod \"keystone-bootstrap-2527p\" (UID: \"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf\") " pod="openstack/keystone-bootstrap-2527p" Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.539477 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-combined-ca-bundle\") pod \"keystone-bootstrap-2527p\" (UID: \"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf\") " pod="openstack/keystone-bootstrap-2527p" Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.539493 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-config-data\") pod \"keystone-bootstrap-2527p\" (UID: \"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf\") " pod="openstack/keystone-bootstrap-2527p" Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.640937 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-credential-keys\") pod \"keystone-bootstrap-2527p\" (UID: \"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf\") " pod="openstack/keystone-bootstrap-2527p" Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.641550 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nmfzm\" (UniqueName: \"kubernetes.io/projected/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-kube-api-access-nmfzm\") pod \"keystone-bootstrap-2527p\" (UID: \"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf\") " pod="openstack/keystone-bootstrap-2527p" Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.641579 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-combined-ca-bundle\") pod \"keystone-bootstrap-2527p\" (UID: \"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf\") " pod="openstack/keystone-bootstrap-2527p" Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.641606 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-config-data\") pod \"keystone-bootstrap-2527p\" (UID: \"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf\") " pod="openstack/keystone-bootstrap-2527p" Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.641633 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-scripts\") pod \"keystone-bootstrap-2527p\" (UID: \"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf\") " pod="openstack/keystone-bootstrap-2527p" Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.641653 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-fernet-keys\") pod \"keystone-bootstrap-2527p\" (UID: \"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf\") " pod="openstack/keystone-bootstrap-2527p" Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.648451 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-credential-keys\") pod \"keystone-bootstrap-2527p\" (UID: \"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf\") " pod="openstack/keystone-bootstrap-2527p" Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.649756 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-combined-ca-bundle\") pod \"keystone-bootstrap-2527p\" (UID: \"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf\") " pod="openstack/keystone-bootstrap-2527p" Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.650221 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-scripts\") pod \"keystone-bootstrap-2527p\" (UID: \"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf\") " pod="openstack/keystone-bootstrap-2527p" Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.650904 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-config-data\") pod \"keystone-bootstrap-2527p\" (UID: \"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf\") " pod="openstack/keystone-bootstrap-2527p" Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.653344 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-fernet-keys\") pod \"keystone-bootstrap-2527p\" (UID: \"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf\") " pod="openstack/keystone-bootstrap-2527p" Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.665544 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmfzm\" (UniqueName: \"kubernetes.io/projected/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-kube-api-access-nmfzm\") pod \"keystone-bootstrap-2527p\" (UID: \"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf\") " pod="openstack/keystone-bootstrap-2527p" Jan 05 23:18:31 crc kubenswrapper[4910]: I0105 23:18:31.809592 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-2527p" Jan 05 23:18:32 crc kubenswrapper[4910]: I0105 23:18:32.291731 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-2527p"] Jan 05 23:18:32 crc kubenswrapper[4910]: I0105 23:18:32.733651 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a70442a9-ce4d-4188-b14c-58c4dd655495" path="/var/lib/kubelet/pods/a70442a9-ce4d-4188-b14c-58c4dd655495/volumes" Jan 05 23:18:33 crc kubenswrapper[4910]: I0105 23:18:33.260201 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-2527p" event={"ID":"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf","Type":"ContainerStarted","Data":"c92fb655cbf1202d690e79f6136e212ae5edb49b92b2c2ddbcef8ff18b8f3fc4"} Jan 05 23:18:33 crc kubenswrapper[4910]: I0105 23:18:33.262549 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-2527p" event={"ID":"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf","Type":"ContainerStarted","Data":"d35d875f892ef917b3db0e7b25c707cee6698d6426bcbec5e73f441975648c36"} Jan 05 23:18:33 crc kubenswrapper[4910]: I0105 23:18:33.301732 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-2527p" podStartSLOduration=2.301700476 podStartE2EDuration="2.301700476s" podCreationTimestamp="2026-01-05 23:18:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:18:33.289866483 +0000 UTC m=+5244.867364193" watchObservedRunningTime="2026-01-05 23:18:33.301700476 +0000 UTC m=+5244.879198176" Jan 05 23:18:34 crc kubenswrapper[4910]: I0105 23:18:34.756238 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5cc49cdcf-q9cm4" Jan 05 23:18:34 crc kubenswrapper[4910]: I0105 23:18:34.840895 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-76554fcc87-lm25j"] Jan 05 23:18:34 crc kubenswrapper[4910]: I0105 23:18:34.844184 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-76554fcc87-lm25j" podUID="87645fe7-bba7-4b33-a47f-44949bb8e28b" containerName="dnsmasq-dns" containerID="cri-o://dfca0a76a4c0728a569ed4f7f0d64a2d878e53597d2a6aa9e3872c25b5573806" gracePeriod=10 Jan 05 23:18:35 crc kubenswrapper[4910]: I0105 23:18:35.285557 4910 generic.go:334] "Generic (PLEG): container finished" podID="87645fe7-bba7-4b33-a47f-44949bb8e28b" containerID="dfca0a76a4c0728a569ed4f7f0d64a2d878e53597d2a6aa9e3872c25b5573806" exitCode=0 Jan 05 23:18:35 crc kubenswrapper[4910]: I0105 23:18:35.285600 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76554fcc87-lm25j" event={"ID":"87645fe7-bba7-4b33-a47f-44949bb8e28b","Type":"ContainerDied","Data":"dfca0a76a4c0728a569ed4f7f0d64a2d878e53597d2a6aa9e3872c25b5573806"} Jan 05 23:18:35 crc kubenswrapper[4910]: I0105 23:18:35.286050 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76554fcc87-lm25j" event={"ID":"87645fe7-bba7-4b33-a47f-44949bb8e28b","Type":"ContainerDied","Data":"a71a687af714fef4bccf5885682f5a2a6ab6bdebf16dddf19497ac3e998a6745"} Jan 05 23:18:35 crc kubenswrapper[4910]: I0105 23:18:35.286070 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a71a687af714fef4bccf5885682f5a2a6ab6bdebf16dddf19497ac3e998a6745" Jan 05 23:18:35 crc kubenswrapper[4910]: I0105 23:18:35.312808 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76554fcc87-lm25j" Jan 05 23:18:35 crc kubenswrapper[4910]: I0105 23:18:35.443526 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/87645fe7-bba7-4b33-a47f-44949bb8e28b-ovsdbserver-sb\") pod \"87645fe7-bba7-4b33-a47f-44949bb8e28b\" (UID: \"87645fe7-bba7-4b33-a47f-44949bb8e28b\") " Jan 05 23:18:35 crc kubenswrapper[4910]: I0105 23:18:35.443748 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j674n\" (UniqueName: \"kubernetes.io/projected/87645fe7-bba7-4b33-a47f-44949bb8e28b-kube-api-access-j674n\") pod \"87645fe7-bba7-4b33-a47f-44949bb8e28b\" (UID: \"87645fe7-bba7-4b33-a47f-44949bb8e28b\") " Jan 05 23:18:35 crc kubenswrapper[4910]: I0105 23:18:35.443819 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87645fe7-bba7-4b33-a47f-44949bb8e28b-config\") pod \"87645fe7-bba7-4b33-a47f-44949bb8e28b\" (UID: \"87645fe7-bba7-4b33-a47f-44949bb8e28b\") " Jan 05 23:18:35 crc kubenswrapper[4910]: I0105 23:18:35.444202 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/87645fe7-bba7-4b33-a47f-44949bb8e28b-dns-svc\") pod \"87645fe7-bba7-4b33-a47f-44949bb8e28b\" (UID: \"87645fe7-bba7-4b33-a47f-44949bb8e28b\") " Jan 05 23:18:35 crc kubenswrapper[4910]: I0105 23:18:35.444284 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/87645fe7-bba7-4b33-a47f-44949bb8e28b-ovsdbserver-nb\") pod \"87645fe7-bba7-4b33-a47f-44949bb8e28b\" (UID: \"87645fe7-bba7-4b33-a47f-44949bb8e28b\") " Jan 05 23:18:35 crc kubenswrapper[4910]: I0105 23:18:35.451099 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87645fe7-bba7-4b33-a47f-44949bb8e28b-kube-api-access-j674n" (OuterVolumeSpecName: "kube-api-access-j674n") pod "87645fe7-bba7-4b33-a47f-44949bb8e28b" (UID: "87645fe7-bba7-4b33-a47f-44949bb8e28b"). InnerVolumeSpecName "kube-api-access-j674n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:18:35 crc kubenswrapper[4910]: I0105 23:18:35.486402 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87645fe7-bba7-4b33-a47f-44949bb8e28b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "87645fe7-bba7-4b33-a47f-44949bb8e28b" (UID: "87645fe7-bba7-4b33-a47f-44949bb8e28b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:18:35 crc kubenswrapper[4910]: I0105 23:18:35.486968 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87645fe7-bba7-4b33-a47f-44949bb8e28b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "87645fe7-bba7-4b33-a47f-44949bb8e28b" (UID: "87645fe7-bba7-4b33-a47f-44949bb8e28b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:18:35 crc kubenswrapper[4910]: I0105 23:18:35.487827 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87645fe7-bba7-4b33-a47f-44949bb8e28b-config" (OuterVolumeSpecName: "config") pod "87645fe7-bba7-4b33-a47f-44949bb8e28b" (UID: "87645fe7-bba7-4b33-a47f-44949bb8e28b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:18:35 crc kubenswrapper[4910]: I0105 23:18:35.495678 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87645fe7-bba7-4b33-a47f-44949bb8e28b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "87645fe7-bba7-4b33-a47f-44949bb8e28b" (UID: "87645fe7-bba7-4b33-a47f-44949bb8e28b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:18:35 crc kubenswrapper[4910]: I0105 23:18:35.545847 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/87645fe7-bba7-4b33-a47f-44949bb8e28b-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 05 23:18:35 crc kubenswrapper[4910]: I0105 23:18:35.545887 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/87645fe7-bba7-4b33-a47f-44949bb8e28b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 05 23:18:35 crc kubenswrapper[4910]: I0105 23:18:35.545901 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/87645fe7-bba7-4b33-a47f-44949bb8e28b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 05 23:18:35 crc kubenswrapper[4910]: I0105 23:18:35.545916 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j674n\" (UniqueName: \"kubernetes.io/projected/87645fe7-bba7-4b33-a47f-44949bb8e28b-kube-api-access-j674n\") on node \"crc\" DevicePath \"\"" Jan 05 23:18:35 crc kubenswrapper[4910]: I0105 23:18:35.545928 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87645fe7-bba7-4b33-a47f-44949bb8e28b-config\") on node \"crc\" DevicePath \"\"" Jan 05 23:18:36 crc kubenswrapper[4910]: I0105 23:18:36.302774 4910 generic.go:334] "Generic (PLEG): container finished" podID="00d270eb-35a0-49cc-b90a-2f0b0c0c2acf" containerID="c92fb655cbf1202d690e79f6136e212ae5edb49b92b2c2ddbcef8ff18b8f3fc4" exitCode=0 Jan 05 23:18:36 crc kubenswrapper[4910]: I0105 23:18:36.302830 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-2527p" event={"ID":"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf","Type":"ContainerDied","Data":"c92fb655cbf1202d690e79f6136e212ae5edb49b92b2c2ddbcef8ff18b8f3fc4"} Jan 05 23:18:36 crc kubenswrapper[4910]: I0105 23:18:36.302942 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76554fcc87-lm25j" Jan 05 23:18:36 crc kubenswrapper[4910]: I0105 23:18:36.371948 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-76554fcc87-lm25j"] Jan 05 23:18:36 crc kubenswrapper[4910]: I0105 23:18:36.381301 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-76554fcc87-lm25j"] Jan 05 23:18:36 crc kubenswrapper[4910]: I0105 23:18:36.740652 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87645fe7-bba7-4b33-a47f-44949bb8e28b" path="/var/lib/kubelet/pods/87645fe7-bba7-4b33-a47f-44949bb8e28b/volumes" Jan 05 23:18:37 crc kubenswrapper[4910]: I0105 23:18:37.782557 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-2527p" Jan 05 23:18:37 crc kubenswrapper[4910]: I0105 23:18:37.889021 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-combined-ca-bundle\") pod \"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf\" (UID: \"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf\") " Jan 05 23:18:37 crc kubenswrapper[4910]: I0105 23:18:37.889074 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-fernet-keys\") pod \"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf\" (UID: \"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf\") " Jan 05 23:18:37 crc kubenswrapper[4910]: I0105 23:18:37.890082 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-config-data\") pod \"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf\" (UID: \"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf\") " Jan 05 23:18:37 crc kubenswrapper[4910]: I0105 23:18:37.890277 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nmfzm\" (UniqueName: \"kubernetes.io/projected/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-kube-api-access-nmfzm\") pod \"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf\" (UID: \"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf\") " Jan 05 23:18:37 crc kubenswrapper[4910]: I0105 23:18:37.890442 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-credential-keys\") pod \"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf\" (UID: \"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf\") " Jan 05 23:18:37 crc kubenswrapper[4910]: I0105 23:18:37.890628 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-scripts\") pod \"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf\" (UID: \"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf\") " Jan 05 23:18:37 crc kubenswrapper[4910]: I0105 23:18:37.897977 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "00d270eb-35a0-49cc-b90a-2f0b0c0c2acf" (UID: "00d270eb-35a0-49cc-b90a-2f0b0c0c2acf"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:18:37 crc kubenswrapper[4910]: I0105 23:18:37.898258 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-scripts" (OuterVolumeSpecName: "scripts") pod "00d270eb-35a0-49cc-b90a-2f0b0c0c2acf" (UID: "00d270eb-35a0-49cc-b90a-2f0b0c0c2acf"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:18:37 crc kubenswrapper[4910]: I0105 23:18:37.898826 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "00d270eb-35a0-49cc-b90a-2f0b0c0c2acf" (UID: "00d270eb-35a0-49cc-b90a-2f0b0c0c2acf"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:18:37 crc kubenswrapper[4910]: I0105 23:18:37.904182 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-kube-api-access-nmfzm" (OuterVolumeSpecName: "kube-api-access-nmfzm") pod "00d270eb-35a0-49cc-b90a-2f0b0c0c2acf" (UID: "00d270eb-35a0-49cc-b90a-2f0b0c0c2acf"). InnerVolumeSpecName "kube-api-access-nmfzm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:18:37 crc kubenswrapper[4910]: I0105 23:18:37.922185 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-config-data" (OuterVolumeSpecName: "config-data") pod "00d270eb-35a0-49cc-b90a-2f0b0c0c2acf" (UID: "00d270eb-35a0-49cc-b90a-2f0b0c0c2acf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:18:37 crc kubenswrapper[4910]: I0105 23:18:37.926620 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "00d270eb-35a0-49cc-b90a-2f0b0c0c2acf" (UID: "00d270eb-35a0-49cc-b90a-2f0b0c0c2acf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:18:37 crc kubenswrapper[4910]: I0105 23:18:37.993710 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:18:37 crc kubenswrapper[4910]: I0105 23:18:37.993758 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nmfzm\" (UniqueName: \"kubernetes.io/projected/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-kube-api-access-nmfzm\") on node \"crc\" DevicePath \"\"" Jan 05 23:18:37 crc kubenswrapper[4910]: I0105 23:18:37.993770 4910 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 05 23:18:37 crc kubenswrapper[4910]: I0105 23:18:37.993780 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:18:37 crc kubenswrapper[4910]: I0105 23:18:37.993789 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:18:37 crc kubenswrapper[4910]: I0105 23:18:37.993799 4910 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.332177 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-2527p" event={"ID":"00d270eb-35a0-49cc-b90a-2f0b0c0c2acf","Type":"ContainerDied","Data":"d35d875f892ef917b3db0e7b25c707cee6698d6426bcbec5e73f441975648c36"} Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.332634 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d35d875f892ef917b3db0e7b25c707cee6698d6426bcbec5e73f441975648c36" Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.332733 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-2527p" Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.565801 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-867597f569-css8k"] Jan 05 23:18:38 crc kubenswrapper[4910]: E0105 23:18:38.566473 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87645fe7-bba7-4b33-a47f-44949bb8e28b" containerName="dnsmasq-dns" Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.566501 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="87645fe7-bba7-4b33-a47f-44949bb8e28b" containerName="dnsmasq-dns" Jan 05 23:18:38 crc kubenswrapper[4910]: E0105 23:18:38.566573 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00d270eb-35a0-49cc-b90a-2f0b0c0c2acf" containerName="keystone-bootstrap" Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.567214 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="00d270eb-35a0-49cc-b90a-2f0b0c0c2acf" containerName="keystone-bootstrap" Jan 05 23:18:38 crc kubenswrapper[4910]: E0105 23:18:38.567248 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87645fe7-bba7-4b33-a47f-44949bb8e28b" containerName="init" Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.567257 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="87645fe7-bba7-4b33-a47f-44949bb8e28b" containerName="init" Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.567461 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="87645fe7-bba7-4b33-a47f-44949bb8e28b" containerName="dnsmasq-dns" Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.567491 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="00d270eb-35a0-49cc-b90a-2f0b0c0c2acf" containerName="keystone-bootstrap" Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.568455 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-867597f569-css8k" Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.575571 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.575656 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.575598 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.575943 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-hll75" Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.606072 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a890ac8e-7d88-463b-90e8-36f55b2c3b6c-config-data\") pod \"keystone-867597f569-css8k\" (UID: \"a890ac8e-7d88-463b-90e8-36f55b2c3b6c\") " pod="openstack/keystone-867597f569-css8k" Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.606177 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a890ac8e-7d88-463b-90e8-36f55b2c3b6c-scripts\") pod \"keystone-867597f569-css8k\" (UID: \"a890ac8e-7d88-463b-90e8-36f55b2c3b6c\") " pod="openstack/keystone-867597f569-css8k" Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.606290 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a890ac8e-7d88-463b-90e8-36f55b2c3b6c-credential-keys\") pod \"keystone-867597f569-css8k\" (UID: \"a890ac8e-7d88-463b-90e8-36f55b2c3b6c\") " pod="openstack/keystone-867597f569-css8k" Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.606327 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a890ac8e-7d88-463b-90e8-36f55b2c3b6c-combined-ca-bundle\") pod \"keystone-867597f569-css8k\" (UID: \"a890ac8e-7d88-463b-90e8-36f55b2c3b6c\") " pod="openstack/keystone-867597f569-css8k" Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.606770 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrf5t\" (UniqueName: \"kubernetes.io/projected/a890ac8e-7d88-463b-90e8-36f55b2c3b6c-kube-api-access-rrf5t\") pod \"keystone-867597f569-css8k\" (UID: \"a890ac8e-7d88-463b-90e8-36f55b2c3b6c\") " pod="openstack/keystone-867597f569-css8k" Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.606869 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a890ac8e-7d88-463b-90e8-36f55b2c3b6c-fernet-keys\") pod \"keystone-867597f569-css8k\" (UID: \"a890ac8e-7d88-463b-90e8-36f55b2c3b6c\") " pod="openstack/keystone-867597f569-css8k" Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.637440 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-867597f569-css8k"] Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.708600 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrf5t\" (UniqueName: \"kubernetes.io/projected/a890ac8e-7d88-463b-90e8-36f55b2c3b6c-kube-api-access-rrf5t\") pod \"keystone-867597f569-css8k\" (UID: \"a890ac8e-7d88-463b-90e8-36f55b2c3b6c\") " pod="openstack/keystone-867597f569-css8k" Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.708653 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a890ac8e-7d88-463b-90e8-36f55b2c3b6c-fernet-keys\") pod \"keystone-867597f569-css8k\" (UID: \"a890ac8e-7d88-463b-90e8-36f55b2c3b6c\") " pod="openstack/keystone-867597f569-css8k" Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.708698 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a890ac8e-7d88-463b-90e8-36f55b2c3b6c-config-data\") pod \"keystone-867597f569-css8k\" (UID: \"a890ac8e-7d88-463b-90e8-36f55b2c3b6c\") " pod="openstack/keystone-867597f569-css8k" Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.708724 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a890ac8e-7d88-463b-90e8-36f55b2c3b6c-scripts\") pod \"keystone-867597f569-css8k\" (UID: \"a890ac8e-7d88-463b-90e8-36f55b2c3b6c\") " pod="openstack/keystone-867597f569-css8k" Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.708777 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a890ac8e-7d88-463b-90e8-36f55b2c3b6c-credential-keys\") pod \"keystone-867597f569-css8k\" (UID: \"a890ac8e-7d88-463b-90e8-36f55b2c3b6c\") " pod="openstack/keystone-867597f569-css8k" Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.708799 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a890ac8e-7d88-463b-90e8-36f55b2c3b6c-combined-ca-bundle\") pod \"keystone-867597f569-css8k\" (UID: \"a890ac8e-7d88-463b-90e8-36f55b2c3b6c\") " pod="openstack/keystone-867597f569-css8k" Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.713586 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a890ac8e-7d88-463b-90e8-36f55b2c3b6c-credential-keys\") pod \"keystone-867597f569-css8k\" (UID: \"a890ac8e-7d88-463b-90e8-36f55b2c3b6c\") " pod="openstack/keystone-867597f569-css8k" Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.713590 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a890ac8e-7d88-463b-90e8-36f55b2c3b6c-combined-ca-bundle\") pod \"keystone-867597f569-css8k\" (UID: \"a890ac8e-7d88-463b-90e8-36f55b2c3b6c\") " pod="openstack/keystone-867597f569-css8k" Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.715274 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a890ac8e-7d88-463b-90e8-36f55b2c3b6c-config-data\") pod \"keystone-867597f569-css8k\" (UID: \"a890ac8e-7d88-463b-90e8-36f55b2c3b6c\") " pod="openstack/keystone-867597f569-css8k" Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.716027 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a890ac8e-7d88-463b-90e8-36f55b2c3b6c-fernet-keys\") pod \"keystone-867597f569-css8k\" (UID: \"a890ac8e-7d88-463b-90e8-36f55b2c3b6c\") " pod="openstack/keystone-867597f569-css8k" Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.730724 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a890ac8e-7d88-463b-90e8-36f55b2c3b6c-scripts\") pod \"keystone-867597f569-css8k\" (UID: \"a890ac8e-7d88-463b-90e8-36f55b2c3b6c\") " pod="openstack/keystone-867597f569-css8k" Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.735209 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrf5t\" (UniqueName: \"kubernetes.io/projected/a890ac8e-7d88-463b-90e8-36f55b2c3b6c-kube-api-access-rrf5t\") pod \"keystone-867597f569-css8k\" (UID: \"a890ac8e-7d88-463b-90e8-36f55b2c3b6c\") " pod="openstack/keystone-867597f569-css8k" Jan 05 23:18:38 crc kubenswrapper[4910]: I0105 23:18:38.903623 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-867597f569-css8k" Jan 05 23:18:39 crc kubenswrapper[4910]: I0105 23:18:39.362033 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-867597f569-css8k"] Jan 05 23:18:39 crc kubenswrapper[4910]: W0105 23:18:39.372151 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda890ac8e_7d88_463b_90e8_36f55b2c3b6c.slice/crio-89f2a61b6c1b3e1c8ed76331b921fcd065d3a160baa35db40b6f8b2ced7a64ad WatchSource:0}: Error finding container 89f2a61b6c1b3e1c8ed76331b921fcd065d3a160baa35db40b6f8b2ced7a64ad: Status 404 returned error can't find the container with id 89f2a61b6c1b3e1c8ed76331b921fcd065d3a160baa35db40b6f8b2ced7a64ad Jan 05 23:18:40 crc kubenswrapper[4910]: I0105 23:18:40.356750 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-867597f569-css8k" event={"ID":"a890ac8e-7d88-463b-90e8-36f55b2c3b6c","Type":"ContainerStarted","Data":"e07ced66d99328ae12d3ed2a34d45d1bea94ee5c84e6a8da82222119fb271008"} Jan 05 23:18:40 crc kubenswrapper[4910]: I0105 23:18:40.357161 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-867597f569-css8k" event={"ID":"a890ac8e-7d88-463b-90e8-36f55b2c3b6c","Type":"ContainerStarted","Data":"89f2a61b6c1b3e1c8ed76331b921fcd065d3a160baa35db40b6f8b2ced7a64ad"} Jan 05 23:18:40 crc kubenswrapper[4910]: I0105 23:18:40.357183 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-867597f569-css8k" Jan 05 23:18:40 crc kubenswrapper[4910]: I0105 23:18:40.400823 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-867597f569-css8k" podStartSLOduration=2.400790996 podStartE2EDuration="2.400790996s" podCreationTimestamp="2026-01-05 23:18:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:18:40.380483763 +0000 UTC m=+5251.957981483" watchObservedRunningTime="2026-01-05 23:18:40.400790996 +0000 UTC m=+5251.978288706" Jan 05 23:18:40 crc kubenswrapper[4910]: I0105 23:18:40.953175 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 23:18:40 crc kubenswrapper[4910]: I0105 23:18:40.953280 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 23:18:44 crc kubenswrapper[4910]: I0105 23:18:44.645953 4910 scope.go:117] "RemoveContainer" containerID="86fd8c561118e166c30daa69f63d8fd4a7976f8c36857959ce87258c109c1c6c" Jan 05 23:18:44 crc kubenswrapper[4910]: I0105 23:18:44.694036 4910 scope.go:117] "RemoveContainer" containerID="262fd24133d16861ec119503b8b080e9eb4d699f023dd4eeb0bbce0a04fcabfb" Jan 05 23:18:44 crc kubenswrapper[4910]: I0105 23:18:44.743756 4910 scope.go:117] "RemoveContainer" containerID="9f3a450307ac313de983e69c9b42a5c0299d6d1a6a49278dda0c039ecaee21b1" Jan 05 23:18:44 crc kubenswrapper[4910]: I0105 23:18:44.783975 4910 scope.go:117] "RemoveContainer" containerID="a228175b818349bbb004a6d8b48a94b20f44c05354e283a0fc587e1d9a29c91c" Jan 05 23:19:10 crc kubenswrapper[4910]: I0105 23:19:10.952545 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 23:19:10 crc kubenswrapper[4910]: I0105 23:19:10.953246 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 23:19:10 crc kubenswrapper[4910]: I0105 23:19:10.953327 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 23:19:10 crc kubenswrapper[4910]: I0105 23:19:10.954570 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c31f0b3ea3ce0d4b0fb62da87fc6d8144c3940f5821592c646ec514147dae31e"} pod="openshift-machine-config-operator/machine-config-daemon-p4t85" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 05 23:19:10 crc kubenswrapper[4910]: I0105 23:19:10.954671 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" containerID="cri-o://c31f0b3ea3ce0d4b0fb62da87fc6d8144c3940f5821592c646ec514147dae31e" gracePeriod=600 Jan 05 23:19:11 crc kubenswrapper[4910]: I0105 23:19:11.020517 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-867597f569-css8k" Jan 05 23:19:11 crc kubenswrapper[4910]: E0105 23:19:11.116807 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:19:11 crc kubenswrapper[4910]: I0105 23:19:11.712452 4910 generic.go:334] "Generic (PLEG): container finished" podID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerID="c31f0b3ea3ce0d4b0fb62da87fc6d8144c3940f5821592c646ec514147dae31e" exitCode=0 Jan 05 23:19:11 crc kubenswrapper[4910]: I0105 23:19:11.712498 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerDied","Data":"c31f0b3ea3ce0d4b0fb62da87fc6d8144c3940f5821592c646ec514147dae31e"} Jan 05 23:19:11 crc kubenswrapper[4910]: I0105 23:19:11.712536 4910 scope.go:117] "RemoveContainer" containerID="1b4947f16488761156b000cbce3970d8b169fa16ff0cca2579226d719a03df0b" Jan 05 23:19:11 crc kubenswrapper[4910]: I0105 23:19:11.713021 4910 scope.go:117] "RemoveContainer" containerID="c31f0b3ea3ce0d4b0fb62da87fc6d8144c3940f5821592c646ec514147dae31e" Jan 05 23:19:11 crc kubenswrapper[4910]: E0105 23:19:11.713395 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:19:13 crc kubenswrapper[4910]: I0105 23:19:13.875242 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 05 23:19:13 crc kubenswrapper[4910]: I0105 23:19:13.877666 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 05 23:19:13 crc kubenswrapper[4910]: I0105 23:19:13.880769 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Jan 05 23:19:13 crc kubenswrapper[4910]: I0105 23:19:13.882516 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Jan 05 23:19:13 crc kubenswrapper[4910]: I0105 23:19:13.883935 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-h9jh9" Jan 05 23:19:13 crc kubenswrapper[4910]: I0105 23:19:13.895482 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 05 23:19:14 crc kubenswrapper[4910]: I0105 23:19:14.033382 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/3f781aaf-1b6a-4c73-bfed-44881ba13710-openstack-config-secret\") pod \"openstackclient\" (UID: \"3f781aaf-1b6a-4c73-bfed-44881ba13710\") " pod="openstack/openstackclient" Jan 05 23:19:14 crc kubenswrapper[4910]: I0105 23:19:14.033489 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h4d8k\" (UniqueName: \"kubernetes.io/projected/3f781aaf-1b6a-4c73-bfed-44881ba13710-kube-api-access-h4d8k\") pod \"openstackclient\" (UID: \"3f781aaf-1b6a-4c73-bfed-44881ba13710\") " pod="openstack/openstackclient" Jan 05 23:19:14 crc kubenswrapper[4910]: I0105 23:19:14.033700 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/3f781aaf-1b6a-4c73-bfed-44881ba13710-openstack-config\") pod \"openstackclient\" (UID: \"3f781aaf-1b6a-4c73-bfed-44881ba13710\") " pod="openstack/openstackclient" Jan 05 23:19:14 crc kubenswrapper[4910]: I0105 23:19:14.136012 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/3f781aaf-1b6a-4c73-bfed-44881ba13710-openstack-config-secret\") pod \"openstackclient\" (UID: \"3f781aaf-1b6a-4c73-bfed-44881ba13710\") " pod="openstack/openstackclient" Jan 05 23:19:14 crc kubenswrapper[4910]: I0105 23:19:14.136090 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h4d8k\" (UniqueName: \"kubernetes.io/projected/3f781aaf-1b6a-4c73-bfed-44881ba13710-kube-api-access-h4d8k\") pod \"openstackclient\" (UID: \"3f781aaf-1b6a-4c73-bfed-44881ba13710\") " pod="openstack/openstackclient" Jan 05 23:19:14 crc kubenswrapper[4910]: I0105 23:19:14.136276 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/3f781aaf-1b6a-4c73-bfed-44881ba13710-openstack-config\") pod \"openstackclient\" (UID: \"3f781aaf-1b6a-4c73-bfed-44881ba13710\") " pod="openstack/openstackclient" Jan 05 23:19:14 crc kubenswrapper[4910]: I0105 23:19:14.137973 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/3f781aaf-1b6a-4c73-bfed-44881ba13710-openstack-config\") pod \"openstackclient\" (UID: \"3f781aaf-1b6a-4c73-bfed-44881ba13710\") " pod="openstack/openstackclient" Jan 05 23:19:14 crc kubenswrapper[4910]: I0105 23:19:14.145867 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/3f781aaf-1b6a-4c73-bfed-44881ba13710-openstack-config-secret\") pod \"openstackclient\" (UID: \"3f781aaf-1b6a-4c73-bfed-44881ba13710\") " pod="openstack/openstackclient" Jan 05 23:19:14 crc kubenswrapper[4910]: I0105 23:19:14.168350 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h4d8k\" (UniqueName: \"kubernetes.io/projected/3f781aaf-1b6a-4c73-bfed-44881ba13710-kube-api-access-h4d8k\") pod \"openstackclient\" (UID: \"3f781aaf-1b6a-4c73-bfed-44881ba13710\") " pod="openstack/openstackclient" Jan 05 23:19:14 crc kubenswrapper[4910]: I0105 23:19:14.215316 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 05 23:19:14 crc kubenswrapper[4910]: I0105 23:19:14.786189 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 05 23:19:15 crc kubenswrapper[4910]: I0105 23:19:15.763937 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"3f781aaf-1b6a-4c73-bfed-44881ba13710","Type":"ContainerStarted","Data":"c98b5115f6c62650a00bd0840adc489bbc03e6c09b09e3e89d6da607760b0f73"} Jan 05 23:19:15 crc kubenswrapper[4910]: I0105 23:19:15.764478 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"3f781aaf-1b6a-4c73-bfed-44881ba13710","Type":"ContainerStarted","Data":"51af2777350cef596aa861fc915af2d49c259bbde4386cd09ea1a906142a95f4"} Jan 05 23:19:15 crc kubenswrapper[4910]: I0105 23:19:15.798301 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.798266273 podStartE2EDuration="2.798266273s" podCreationTimestamp="2026-01-05 23:19:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:19:15.78521851 +0000 UTC m=+5287.362716260" watchObservedRunningTime="2026-01-05 23:19:15.798266273 +0000 UTC m=+5287.375763973" Jan 05 23:19:23 crc kubenswrapper[4910]: I0105 23:19:23.723168 4910 scope.go:117] "RemoveContainer" containerID="c31f0b3ea3ce0d4b0fb62da87fc6d8144c3940f5821592c646ec514147dae31e" Jan 05 23:19:23 crc kubenswrapper[4910]: E0105 23:19:23.724345 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:19:38 crc kubenswrapper[4910]: I0105 23:19:38.726539 4910 scope.go:117] "RemoveContainer" containerID="c31f0b3ea3ce0d4b0fb62da87fc6d8144c3940f5821592c646ec514147dae31e" Jan 05 23:19:38 crc kubenswrapper[4910]: E0105 23:19:38.727691 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:19:44 crc kubenswrapper[4910]: I0105 23:19:44.983202 4910 scope.go:117] "RemoveContainer" containerID="28e529ed0326863121da9918c08a8d2325dda120b46be4de1ecb71f4cb0e1bf3" Jan 05 23:19:45 crc kubenswrapper[4910]: I0105 23:19:45.025472 4910 scope.go:117] "RemoveContainer" containerID="86c46c207b16005f4b2d841a6704c8ab9922d5b301a9d1d884c8411e1529ff6e" Jan 05 23:19:45 crc kubenswrapper[4910]: I0105 23:19:45.083493 4910 scope.go:117] "RemoveContainer" containerID="9fcacaa850bdeda006c85e393c14525b24ec5f17067751f6c11e958e0c747ad5" Jan 05 23:19:45 crc kubenswrapper[4910]: I0105 23:19:45.134731 4910 scope.go:117] "RemoveContainer" containerID="587b71da4fbb3281df05fa00fce9c4aa0616966feda701069ff8a814c8c6c9dc" Jan 05 23:19:50 crc kubenswrapper[4910]: I0105 23:19:50.722553 4910 scope.go:117] "RemoveContainer" containerID="c31f0b3ea3ce0d4b0fb62da87fc6d8144c3940f5821592c646ec514147dae31e" Jan 05 23:19:50 crc kubenswrapper[4910]: E0105 23:19:50.724227 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:20:03 crc kubenswrapper[4910]: I0105 23:20:03.721726 4910 scope.go:117] "RemoveContainer" containerID="c31f0b3ea3ce0d4b0fb62da87fc6d8144c3940f5821592c646ec514147dae31e" Jan 05 23:20:03 crc kubenswrapper[4910]: E0105 23:20:03.722849 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:20:09 crc kubenswrapper[4910]: I0105 23:20:09.600552 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-24nfq"] Jan 05 23:20:09 crc kubenswrapper[4910]: I0105 23:20:09.604948 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-24nfq" Jan 05 23:20:09 crc kubenswrapper[4910]: I0105 23:20:09.625706 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-24nfq"] Jan 05 23:20:09 crc kubenswrapper[4910]: I0105 23:20:09.659174 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rx2x2\" (UniqueName: \"kubernetes.io/projected/8835948c-7425-4b43-abcd-c152feb46c72-kube-api-access-rx2x2\") pod \"redhat-operators-24nfq\" (UID: \"8835948c-7425-4b43-abcd-c152feb46c72\") " pod="openshift-marketplace/redhat-operators-24nfq" Jan 05 23:20:09 crc kubenswrapper[4910]: I0105 23:20:09.659639 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8835948c-7425-4b43-abcd-c152feb46c72-catalog-content\") pod \"redhat-operators-24nfq\" (UID: \"8835948c-7425-4b43-abcd-c152feb46c72\") " pod="openshift-marketplace/redhat-operators-24nfq" Jan 05 23:20:09 crc kubenswrapper[4910]: I0105 23:20:09.659743 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8835948c-7425-4b43-abcd-c152feb46c72-utilities\") pod \"redhat-operators-24nfq\" (UID: \"8835948c-7425-4b43-abcd-c152feb46c72\") " pod="openshift-marketplace/redhat-operators-24nfq" Jan 05 23:20:09 crc kubenswrapper[4910]: I0105 23:20:09.762109 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8835948c-7425-4b43-abcd-c152feb46c72-catalog-content\") pod \"redhat-operators-24nfq\" (UID: \"8835948c-7425-4b43-abcd-c152feb46c72\") " pod="openshift-marketplace/redhat-operators-24nfq" Jan 05 23:20:09 crc kubenswrapper[4910]: I0105 23:20:09.762255 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8835948c-7425-4b43-abcd-c152feb46c72-utilities\") pod \"redhat-operators-24nfq\" (UID: \"8835948c-7425-4b43-abcd-c152feb46c72\") " pod="openshift-marketplace/redhat-operators-24nfq" Jan 05 23:20:09 crc kubenswrapper[4910]: I0105 23:20:09.763089 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8835948c-7425-4b43-abcd-c152feb46c72-utilities\") pod \"redhat-operators-24nfq\" (UID: \"8835948c-7425-4b43-abcd-c152feb46c72\") " pod="openshift-marketplace/redhat-operators-24nfq" Jan 05 23:20:09 crc kubenswrapper[4910]: I0105 23:20:09.763178 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8835948c-7425-4b43-abcd-c152feb46c72-catalog-content\") pod \"redhat-operators-24nfq\" (UID: \"8835948c-7425-4b43-abcd-c152feb46c72\") " pod="openshift-marketplace/redhat-operators-24nfq" Jan 05 23:20:09 crc kubenswrapper[4910]: I0105 23:20:09.763194 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rx2x2\" (UniqueName: \"kubernetes.io/projected/8835948c-7425-4b43-abcd-c152feb46c72-kube-api-access-rx2x2\") pod \"redhat-operators-24nfq\" (UID: \"8835948c-7425-4b43-abcd-c152feb46c72\") " pod="openshift-marketplace/redhat-operators-24nfq" Jan 05 23:20:09 crc kubenswrapper[4910]: I0105 23:20:09.785131 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rx2x2\" (UniqueName: \"kubernetes.io/projected/8835948c-7425-4b43-abcd-c152feb46c72-kube-api-access-rx2x2\") pod \"redhat-operators-24nfq\" (UID: \"8835948c-7425-4b43-abcd-c152feb46c72\") " pod="openshift-marketplace/redhat-operators-24nfq" Jan 05 23:20:09 crc kubenswrapper[4910]: I0105 23:20:09.951234 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-24nfq" Jan 05 23:20:10 crc kubenswrapper[4910]: I0105 23:20:10.407036 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-24nfq"] Jan 05 23:20:10 crc kubenswrapper[4910]: E0105 23:20:10.813693 4910 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8835948c_7425_4b43_abcd_c152feb46c72.slice/crio-4807d94311e6aab1f76338c9b7f06452569b9dcf0ed5bdc523ebc06df9c57feb.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8835948c_7425_4b43_abcd_c152feb46c72.slice/crio-conmon-4807d94311e6aab1f76338c9b7f06452569b9dcf0ed5bdc523ebc06df9c57feb.scope\": RecentStats: unable to find data in memory cache]" Jan 05 23:20:11 crc kubenswrapper[4910]: I0105 23:20:11.396282 4910 generic.go:334] "Generic (PLEG): container finished" podID="8835948c-7425-4b43-abcd-c152feb46c72" containerID="4807d94311e6aab1f76338c9b7f06452569b9dcf0ed5bdc523ebc06df9c57feb" exitCode=0 Jan 05 23:20:11 crc kubenswrapper[4910]: I0105 23:20:11.396352 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-24nfq" event={"ID":"8835948c-7425-4b43-abcd-c152feb46c72","Type":"ContainerDied","Data":"4807d94311e6aab1f76338c9b7f06452569b9dcf0ed5bdc523ebc06df9c57feb"} Jan 05 23:20:11 crc kubenswrapper[4910]: I0105 23:20:11.396385 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-24nfq" event={"ID":"8835948c-7425-4b43-abcd-c152feb46c72","Type":"ContainerStarted","Data":"baaa0716939eeba5e9cd3237f06dcb9dea9dbb8512b11c6d014c0ad37e4ecd20"} Jan 05 23:20:13 crc kubenswrapper[4910]: I0105 23:20:13.420073 4910 generic.go:334] "Generic (PLEG): container finished" podID="8835948c-7425-4b43-abcd-c152feb46c72" containerID="89089e3721f7e534b7d4e20a0b6aba48ac252bc268d2b0734c8d873b0a35f335" exitCode=0 Jan 05 23:20:13 crc kubenswrapper[4910]: I0105 23:20:13.421236 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-24nfq" event={"ID":"8835948c-7425-4b43-abcd-c152feb46c72","Type":"ContainerDied","Data":"89089e3721f7e534b7d4e20a0b6aba48ac252bc268d2b0734c8d873b0a35f335"} Jan 05 23:20:14 crc kubenswrapper[4910]: E0105 23:20:14.424034 4910 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.166:58258->38.102.83.166:40365: write tcp 38.102.83.166:58258->38.102.83.166:40365: write: broken pipe Jan 05 23:20:14 crc kubenswrapper[4910]: I0105 23:20:14.433737 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-24nfq" event={"ID":"8835948c-7425-4b43-abcd-c152feb46c72","Type":"ContainerStarted","Data":"f6f5ced66878df5dc97b6a93298ee0ff18b7014f6f1a944deda1fe6ae4d06289"} Jan 05 23:20:14 crc kubenswrapper[4910]: I0105 23:20:14.471203 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-24nfq" podStartSLOduration=2.865694911 podStartE2EDuration="5.471092501s" podCreationTimestamp="2026-01-05 23:20:09 +0000 UTC" firstStartedPulling="2026-01-05 23:20:11.398266224 +0000 UTC m=+5342.975763894" lastFinishedPulling="2026-01-05 23:20:14.003663804 +0000 UTC m=+5345.581161484" observedRunningTime="2026-01-05 23:20:14.459841793 +0000 UTC m=+5346.037339473" watchObservedRunningTime="2026-01-05 23:20:14.471092501 +0000 UTC m=+5346.048590211" Jan 05 23:20:14 crc kubenswrapper[4910]: I0105 23:20:14.721803 4910 scope.go:117] "RemoveContainer" containerID="c31f0b3ea3ce0d4b0fb62da87fc6d8144c3940f5821592c646ec514147dae31e" Jan 05 23:20:14 crc kubenswrapper[4910]: E0105 23:20:14.722156 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:20:19 crc kubenswrapper[4910]: I0105 23:20:19.951947 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-24nfq" Jan 05 23:20:19 crc kubenswrapper[4910]: I0105 23:20:19.952455 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-24nfq" Jan 05 23:20:21 crc kubenswrapper[4910]: I0105 23:20:21.005653 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-24nfq" podUID="8835948c-7425-4b43-abcd-c152feb46c72" containerName="registry-server" probeResult="failure" output=< Jan 05 23:20:21 crc kubenswrapper[4910]: timeout: failed to connect service ":50051" within 1s Jan 05 23:20:21 crc kubenswrapper[4910]: > Jan 05 23:20:28 crc kubenswrapper[4910]: I0105 23:20:28.728148 4910 scope.go:117] "RemoveContainer" containerID="c31f0b3ea3ce0d4b0fb62da87fc6d8144c3940f5821592c646ec514147dae31e" Jan 05 23:20:28 crc kubenswrapper[4910]: E0105 23:20:28.729010 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:20:29 crc kubenswrapper[4910]: I0105 23:20:29.828157 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-5h9mm"] Jan 05 23:20:29 crc kubenswrapper[4910]: I0105 23:20:29.830109 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5h9mm" Jan 05 23:20:29 crc kubenswrapper[4910]: I0105 23:20:29.856991 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5h9mm"] Jan 05 23:20:29 crc kubenswrapper[4910]: I0105 23:20:29.938756 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wmxmt\" (UniqueName: \"kubernetes.io/projected/1fd882ef-1f78-4e52-bc06-24be6766fb1f-kube-api-access-wmxmt\") pod \"certified-operators-5h9mm\" (UID: \"1fd882ef-1f78-4e52-bc06-24be6766fb1f\") " pod="openshift-marketplace/certified-operators-5h9mm" Jan 05 23:20:29 crc kubenswrapper[4910]: I0105 23:20:29.938901 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1fd882ef-1f78-4e52-bc06-24be6766fb1f-utilities\") pod \"certified-operators-5h9mm\" (UID: \"1fd882ef-1f78-4e52-bc06-24be6766fb1f\") " pod="openshift-marketplace/certified-operators-5h9mm" Jan 05 23:20:29 crc kubenswrapper[4910]: I0105 23:20:29.938987 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1fd882ef-1f78-4e52-bc06-24be6766fb1f-catalog-content\") pod \"certified-operators-5h9mm\" (UID: \"1fd882ef-1f78-4e52-bc06-24be6766fb1f\") " pod="openshift-marketplace/certified-operators-5h9mm" Jan 05 23:20:30 crc kubenswrapper[4910]: I0105 23:20:30.007609 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-24nfq" Jan 05 23:20:30 crc kubenswrapper[4910]: I0105 23:20:30.040751 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1fd882ef-1f78-4e52-bc06-24be6766fb1f-catalog-content\") pod \"certified-operators-5h9mm\" (UID: \"1fd882ef-1f78-4e52-bc06-24be6766fb1f\") " pod="openshift-marketplace/certified-operators-5h9mm" Jan 05 23:20:30 crc kubenswrapper[4910]: I0105 23:20:30.040857 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wmxmt\" (UniqueName: \"kubernetes.io/projected/1fd882ef-1f78-4e52-bc06-24be6766fb1f-kube-api-access-wmxmt\") pod \"certified-operators-5h9mm\" (UID: \"1fd882ef-1f78-4e52-bc06-24be6766fb1f\") " pod="openshift-marketplace/certified-operators-5h9mm" Jan 05 23:20:30 crc kubenswrapper[4910]: I0105 23:20:30.040928 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1fd882ef-1f78-4e52-bc06-24be6766fb1f-utilities\") pod \"certified-operators-5h9mm\" (UID: \"1fd882ef-1f78-4e52-bc06-24be6766fb1f\") " pod="openshift-marketplace/certified-operators-5h9mm" Jan 05 23:20:30 crc kubenswrapper[4910]: I0105 23:20:30.041421 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1fd882ef-1f78-4e52-bc06-24be6766fb1f-catalog-content\") pod \"certified-operators-5h9mm\" (UID: \"1fd882ef-1f78-4e52-bc06-24be6766fb1f\") " pod="openshift-marketplace/certified-operators-5h9mm" Jan 05 23:20:30 crc kubenswrapper[4910]: I0105 23:20:30.041513 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1fd882ef-1f78-4e52-bc06-24be6766fb1f-utilities\") pod \"certified-operators-5h9mm\" (UID: \"1fd882ef-1f78-4e52-bc06-24be6766fb1f\") " pod="openshift-marketplace/certified-operators-5h9mm" Jan 05 23:20:30 crc kubenswrapper[4910]: I0105 23:20:30.068601 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-24nfq" Jan 05 23:20:30 crc kubenswrapper[4910]: I0105 23:20:30.069285 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wmxmt\" (UniqueName: \"kubernetes.io/projected/1fd882ef-1f78-4e52-bc06-24be6766fb1f-kube-api-access-wmxmt\") pod \"certified-operators-5h9mm\" (UID: \"1fd882ef-1f78-4e52-bc06-24be6766fb1f\") " pod="openshift-marketplace/certified-operators-5h9mm" Jan 05 23:20:30 crc kubenswrapper[4910]: I0105 23:20:30.163953 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5h9mm" Jan 05 23:20:30 crc kubenswrapper[4910]: W0105 23:20:30.742316 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1fd882ef_1f78_4e52_bc06_24be6766fb1f.slice/crio-bbd9e4a2142dc0a16151c6d4689bbb79881d80ca568b9d5c64ff93284168615d WatchSource:0}: Error finding container bbd9e4a2142dc0a16151c6d4689bbb79881d80ca568b9d5c64ff93284168615d: Status 404 returned error can't find the container with id bbd9e4a2142dc0a16151c6d4689bbb79881d80ca568b9d5c64ff93284168615d Jan 05 23:20:30 crc kubenswrapper[4910]: I0105 23:20:30.744461 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5h9mm"] Jan 05 23:20:31 crc kubenswrapper[4910]: I0105 23:20:31.613545 4910 generic.go:334] "Generic (PLEG): container finished" podID="1fd882ef-1f78-4e52-bc06-24be6766fb1f" containerID="7f68e87bc32cd90459bb982917b8e1074bb78158067c0fd7ccdfe3bc6e34c928" exitCode=0 Jan 05 23:20:31 crc kubenswrapper[4910]: I0105 23:20:31.613661 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5h9mm" event={"ID":"1fd882ef-1f78-4e52-bc06-24be6766fb1f","Type":"ContainerDied","Data":"7f68e87bc32cd90459bb982917b8e1074bb78158067c0fd7ccdfe3bc6e34c928"} Jan 05 23:20:31 crc kubenswrapper[4910]: I0105 23:20:31.613972 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5h9mm" event={"ID":"1fd882ef-1f78-4e52-bc06-24be6766fb1f","Type":"ContainerStarted","Data":"bbd9e4a2142dc0a16151c6d4689bbb79881d80ca568b9d5c64ff93284168615d"} Jan 05 23:20:32 crc kubenswrapper[4910]: I0105 23:20:32.624966 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5h9mm" event={"ID":"1fd882ef-1f78-4e52-bc06-24be6766fb1f","Type":"ContainerStarted","Data":"f4c118069fe70af815499316d2c36909cfe6313e788b3f52cc72b11a5b1869f5"} Jan 05 23:20:33 crc kubenswrapper[4910]: I0105 23:20:33.636426 4910 generic.go:334] "Generic (PLEG): container finished" podID="1fd882ef-1f78-4e52-bc06-24be6766fb1f" containerID="f4c118069fe70af815499316d2c36909cfe6313e788b3f52cc72b11a5b1869f5" exitCode=0 Jan 05 23:20:33 crc kubenswrapper[4910]: I0105 23:20:33.637202 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5h9mm" event={"ID":"1fd882ef-1f78-4e52-bc06-24be6766fb1f","Type":"ContainerDied","Data":"f4c118069fe70af815499316d2c36909cfe6313e788b3f52cc72b11a5b1869f5"} Jan 05 23:20:34 crc kubenswrapper[4910]: I0105 23:20:34.687839 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5h9mm" event={"ID":"1fd882ef-1f78-4e52-bc06-24be6766fb1f","Type":"ContainerStarted","Data":"5e687ab21379e04c3458ef6700953c03e89aa97a268d5d0221f372b112e00c26"} Jan 05 23:20:34 crc kubenswrapper[4910]: I0105 23:20:34.717586 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-5h9mm" podStartSLOduration=3.206785884 podStartE2EDuration="5.717562901s" podCreationTimestamp="2026-01-05 23:20:29 +0000 UTC" firstStartedPulling="2026-01-05 23:20:31.616814753 +0000 UTC m=+5363.194312423" lastFinishedPulling="2026-01-05 23:20:34.12759177 +0000 UTC m=+5365.705089440" observedRunningTime="2026-01-05 23:20:34.715308796 +0000 UTC m=+5366.292806466" watchObservedRunningTime="2026-01-05 23:20:34.717562901 +0000 UTC m=+5366.295060571" Jan 05 23:20:34 crc kubenswrapper[4910]: I0105 23:20:34.805716 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-24nfq"] Jan 05 23:20:34 crc kubenswrapper[4910]: I0105 23:20:34.805970 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-24nfq" podUID="8835948c-7425-4b43-abcd-c152feb46c72" containerName="registry-server" containerID="cri-o://f6f5ced66878df5dc97b6a93298ee0ff18b7014f6f1a944deda1fe6ae4d06289" gracePeriod=2 Jan 05 23:20:35 crc kubenswrapper[4910]: I0105 23:20:35.245793 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-24nfq" Jan 05 23:20:35 crc kubenswrapper[4910]: I0105 23:20:35.343806 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8835948c-7425-4b43-abcd-c152feb46c72-utilities\") pod \"8835948c-7425-4b43-abcd-c152feb46c72\" (UID: \"8835948c-7425-4b43-abcd-c152feb46c72\") " Jan 05 23:20:35 crc kubenswrapper[4910]: I0105 23:20:35.343929 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8835948c-7425-4b43-abcd-c152feb46c72-catalog-content\") pod \"8835948c-7425-4b43-abcd-c152feb46c72\" (UID: \"8835948c-7425-4b43-abcd-c152feb46c72\") " Jan 05 23:20:35 crc kubenswrapper[4910]: I0105 23:20:35.344052 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rx2x2\" (UniqueName: \"kubernetes.io/projected/8835948c-7425-4b43-abcd-c152feb46c72-kube-api-access-rx2x2\") pod \"8835948c-7425-4b43-abcd-c152feb46c72\" (UID: \"8835948c-7425-4b43-abcd-c152feb46c72\") " Jan 05 23:20:35 crc kubenswrapper[4910]: I0105 23:20:35.344696 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8835948c-7425-4b43-abcd-c152feb46c72-utilities" (OuterVolumeSpecName: "utilities") pod "8835948c-7425-4b43-abcd-c152feb46c72" (UID: "8835948c-7425-4b43-abcd-c152feb46c72"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:20:35 crc kubenswrapper[4910]: I0105 23:20:35.356622 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8835948c-7425-4b43-abcd-c152feb46c72-kube-api-access-rx2x2" (OuterVolumeSpecName: "kube-api-access-rx2x2") pod "8835948c-7425-4b43-abcd-c152feb46c72" (UID: "8835948c-7425-4b43-abcd-c152feb46c72"). InnerVolumeSpecName "kube-api-access-rx2x2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:20:35 crc kubenswrapper[4910]: I0105 23:20:35.446929 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8835948c-7425-4b43-abcd-c152feb46c72-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 23:20:35 crc kubenswrapper[4910]: I0105 23:20:35.446985 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rx2x2\" (UniqueName: \"kubernetes.io/projected/8835948c-7425-4b43-abcd-c152feb46c72-kube-api-access-rx2x2\") on node \"crc\" DevicePath \"\"" Jan 05 23:20:35 crc kubenswrapper[4910]: I0105 23:20:35.457949 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8835948c-7425-4b43-abcd-c152feb46c72-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8835948c-7425-4b43-abcd-c152feb46c72" (UID: "8835948c-7425-4b43-abcd-c152feb46c72"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:20:35 crc kubenswrapper[4910]: I0105 23:20:35.548942 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8835948c-7425-4b43-abcd-c152feb46c72-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 23:20:35 crc kubenswrapper[4910]: I0105 23:20:35.697881 4910 generic.go:334] "Generic (PLEG): container finished" podID="8835948c-7425-4b43-abcd-c152feb46c72" containerID="f6f5ced66878df5dc97b6a93298ee0ff18b7014f6f1a944deda1fe6ae4d06289" exitCode=0 Jan 05 23:20:35 crc kubenswrapper[4910]: I0105 23:20:35.697925 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-24nfq" event={"ID":"8835948c-7425-4b43-abcd-c152feb46c72","Type":"ContainerDied","Data":"f6f5ced66878df5dc97b6a93298ee0ff18b7014f6f1a944deda1fe6ae4d06289"} Jan 05 23:20:35 crc kubenswrapper[4910]: I0105 23:20:35.697973 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-24nfq" Jan 05 23:20:35 crc kubenswrapper[4910]: I0105 23:20:35.698009 4910 scope.go:117] "RemoveContainer" containerID="f6f5ced66878df5dc97b6a93298ee0ff18b7014f6f1a944deda1fe6ae4d06289" Jan 05 23:20:35 crc kubenswrapper[4910]: I0105 23:20:35.697991 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-24nfq" event={"ID":"8835948c-7425-4b43-abcd-c152feb46c72","Type":"ContainerDied","Data":"baaa0716939eeba5e9cd3237f06dcb9dea9dbb8512b11c6d014c0ad37e4ecd20"} Jan 05 23:20:35 crc kubenswrapper[4910]: I0105 23:20:35.718236 4910 scope.go:117] "RemoveContainer" containerID="89089e3721f7e534b7d4e20a0b6aba48ac252bc268d2b0734c8d873b0a35f335" Jan 05 23:20:35 crc kubenswrapper[4910]: I0105 23:20:35.738404 4910 scope.go:117] "RemoveContainer" containerID="4807d94311e6aab1f76338c9b7f06452569b9dcf0ed5bdc523ebc06df9c57feb" Jan 05 23:20:35 crc kubenswrapper[4910]: I0105 23:20:35.738406 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-24nfq"] Jan 05 23:20:35 crc kubenswrapper[4910]: I0105 23:20:35.745322 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-24nfq"] Jan 05 23:20:35 crc kubenswrapper[4910]: I0105 23:20:35.778589 4910 scope.go:117] "RemoveContainer" containerID="f6f5ced66878df5dc97b6a93298ee0ff18b7014f6f1a944deda1fe6ae4d06289" Jan 05 23:20:35 crc kubenswrapper[4910]: E0105 23:20:35.780743 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f6f5ced66878df5dc97b6a93298ee0ff18b7014f6f1a944deda1fe6ae4d06289\": container with ID starting with f6f5ced66878df5dc97b6a93298ee0ff18b7014f6f1a944deda1fe6ae4d06289 not found: ID does not exist" containerID="f6f5ced66878df5dc97b6a93298ee0ff18b7014f6f1a944deda1fe6ae4d06289" Jan 05 23:20:35 crc kubenswrapper[4910]: I0105 23:20:35.780806 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f6f5ced66878df5dc97b6a93298ee0ff18b7014f6f1a944deda1fe6ae4d06289"} err="failed to get container status \"f6f5ced66878df5dc97b6a93298ee0ff18b7014f6f1a944deda1fe6ae4d06289\": rpc error: code = NotFound desc = could not find container \"f6f5ced66878df5dc97b6a93298ee0ff18b7014f6f1a944deda1fe6ae4d06289\": container with ID starting with f6f5ced66878df5dc97b6a93298ee0ff18b7014f6f1a944deda1fe6ae4d06289 not found: ID does not exist" Jan 05 23:20:35 crc kubenswrapper[4910]: I0105 23:20:35.780841 4910 scope.go:117] "RemoveContainer" containerID="89089e3721f7e534b7d4e20a0b6aba48ac252bc268d2b0734c8d873b0a35f335" Jan 05 23:20:35 crc kubenswrapper[4910]: E0105 23:20:35.782136 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"89089e3721f7e534b7d4e20a0b6aba48ac252bc268d2b0734c8d873b0a35f335\": container with ID starting with 89089e3721f7e534b7d4e20a0b6aba48ac252bc268d2b0734c8d873b0a35f335 not found: ID does not exist" containerID="89089e3721f7e534b7d4e20a0b6aba48ac252bc268d2b0734c8d873b0a35f335" Jan 05 23:20:35 crc kubenswrapper[4910]: I0105 23:20:35.782190 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"89089e3721f7e534b7d4e20a0b6aba48ac252bc268d2b0734c8d873b0a35f335"} err="failed to get container status \"89089e3721f7e534b7d4e20a0b6aba48ac252bc268d2b0734c8d873b0a35f335\": rpc error: code = NotFound desc = could not find container \"89089e3721f7e534b7d4e20a0b6aba48ac252bc268d2b0734c8d873b0a35f335\": container with ID starting with 89089e3721f7e534b7d4e20a0b6aba48ac252bc268d2b0734c8d873b0a35f335 not found: ID does not exist" Jan 05 23:20:35 crc kubenswrapper[4910]: I0105 23:20:35.782222 4910 scope.go:117] "RemoveContainer" containerID="4807d94311e6aab1f76338c9b7f06452569b9dcf0ed5bdc523ebc06df9c57feb" Jan 05 23:20:35 crc kubenswrapper[4910]: E0105 23:20:35.783218 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4807d94311e6aab1f76338c9b7f06452569b9dcf0ed5bdc523ebc06df9c57feb\": container with ID starting with 4807d94311e6aab1f76338c9b7f06452569b9dcf0ed5bdc523ebc06df9c57feb not found: ID does not exist" containerID="4807d94311e6aab1f76338c9b7f06452569b9dcf0ed5bdc523ebc06df9c57feb" Jan 05 23:20:35 crc kubenswrapper[4910]: I0105 23:20:35.783253 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4807d94311e6aab1f76338c9b7f06452569b9dcf0ed5bdc523ebc06df9c57feb"} err="failed to get container status \"4807d94311e6aab1f76338c9b7f06452569b9dcf0ed5bdc523ebc06df9c57feb\": rpc error: code = NotFound desc = could not find container \"4807d94311e6aab1f76338c9b7f06452569b9dcf0ed5bdc523ebc06df9c57feb\": container with ID starting with 4807d94311e6aab1f76338c9b7f06452569b9dcf0ed5bdc523ebc06df9c57feb not found: ID does not exist" Jan 05 23:20:36 crc kubenswrapper[4910]: I0105 23:20:36.739071 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8835948c-7425-4b43-abcd-c152feb46c72" path="/var/lib/kubelet/pods/8835948c-7425-4b43-abcd-c152feb46c72/volumes" Jan 05 23:20:40 crc kubenswrapper[4910]: I0105 23:20:40.169220 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-5h9mm" Jan 05 23:20:40 crc kubenswrapper[4910]: I0105 23:20:40.170622 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-5h9mm" Jan 05 23:20:40 crc kubenswrapper[4910]: I0105 23:20:40.218829 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-5h9mm" Jan 05 23:20:40 crc kubenswrapper[4910]: I0105 23:20:40.794156 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-5h9mm" Jan 05 23:20:42 crc kubenswrapper[4910]: I0105 23:20:42.606136 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5h9mm"] Jan 05 23:20:42 crc kubenswrapper[4910]: I0105 23:20:42.722485 4910 scope.go:117] "RemoveContainer" containerID="c31f0b3ea3ce0d4b0fb62da87fc6d8144c3940f5821592c646ec514147dae31e" Jan 05 23:20:42 crc kubenswrapper[4910]: E0105 23:20:42.722804 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:20:42 crc kubenswrapper[4910]: I0105 23:20:42.763310 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-5h9mm" podUID="1fd882ef-1f78-4e52-bc06-24be6766fb1f" containerName="registry-server" containerID="cri-o://5e687ab21379e04c3458ef6700953c03e89aa97a268d5d0221f372b112e00c26" gracePeriod=2 Jan 05 23:20:43 crc kubenswrapper[4910]: I0105 23:20:43.782736 4910 generic.go:334] "Generic (PLEG): container finished" podID="1fd882ef-1f78-4e52-bc06-24be6766fb1f" containerID="5e687ab21379e04c3458ef6700953c03e89aa97a268d5d0221f372b112e00c26" exitCode=0 Jan 05 23:20:43 crc kubenswrapper[4910]: I0105 23:20:43.782776 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5h9mm" event={"ID":"1fd882ef-1f78-4e52-bc06-24be6766fb1f","Type":"ContainerDied","Data":"5e687ab21379e04c3458ef6700953c03e89aa97a268d5d0221f372b112e00c26"} Jan 05 23:20:44 crc kubenswrapper[4910]: I0105 23:20:44.291311 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5h9mm" Jan 05 23:20:44 crc kubenswrapper[4910]: I0105 23:20:44.337072 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wmxmt\" (UniqueName: \"kubernetes.io/projected/1fd882ef-1f78-4e52-bc06-24be6766fb1f-kube-api-access-wmxmt\") pod \"1fd882ef-1f78-4e52-bc06-24be6766fb1f\" (UID: \"1fd882ef-1f78-4e52-bc06-24be6766fb1f\") " Jan 05 23:20:44 crc kubenswrapper[4910]: I0105 23:20:44.337162 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1fd882ef-1f78-4e52-bc06-24be6766fb1f-catalog-content\") pod \"1fd882ef-1f78-4e52-bc06-24be6766fb1f\" (UID: \"1fd882ef-1f78-4e52-bc06-24be6766fb1f\") " Jan 05 23:20:44 crc kubenswrapper[4910]: I0105 23:20:44.337314 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1fd882ef-1f78-4e52-bc06-24be6766fb1f-utilities\") pod \"1fd882ef-1f78-4e52-bc06-24be6766fb1f\" (UID: \"1fd882ef-1f78-4e52-bc06-24be6766fb1f\") " Jan 05 23:20:44 crc kubenswrapper[4910]: I0105 23:20:44.338550 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1fd882ef-1f78-4e52-bc06-24be6766fb1f-utilities" (OuterVolumeSpecName: "utilities") pod "1fd882ef-1f78-4e52-bc06-24be6766fb1f" (UID: "1fd882ef-1f78-4e52-bc06-24be6766fb1f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:20:44 crc kubenswrapper[4910]: I0105 23:20:44.364855 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1fd882ef-1f78-4e52-bc06-24be6766fb1f-kube-api-access-wmxmt" (OuterVolumeSpecName: "kube-api-access-wmxmt") pod "1fd882ef-1f78-4e52-bc06-24be6766fb1f" (UID: "1fd882ef-1f78-4e52-bc06-24be6766fb1f"). InnerVolumeSpecName "kube-api-access-wmxmt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:20:44 crc kubenswrapper[4910]: I0105 23:20:44.414374 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1fd882ef-1f78-4e52-bc06-24be6766fb1f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1fd882ef-1f78-4e52-bc06-24be6766fb1f" (UID: "1fd882ef-1f78-4e52-bc06-24be6766fb1f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:20:44 crc kubenswrapper[4910]: I0105 23:20:44.439597 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1fd882ef-1f78-4e52-bc06-24be6766fb1f-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 23:20:44 crc kubenswrapper[4910]: I0105 23:20:44.439644 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wmxmt\" (UniqueName: \"kubernetes.io/projected/1fd882ef-1f78-4e52-bc06-24be6766fb1f-kube-api-access-wmxmt\") on node \"crc\" DevicePath \"\"" Jan 05 23:20:44 crc kubenswrapper[4910]: I0105 23:20:44.439655 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1fd882ef-1f78-4e52-bc06-24be6766fb1f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 23:20:44 crc kubenswrapper[4910]: I0105 23:20:44.803801 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5h9mm" event={"ID":"1fd882ef-1f78-4e52-bc06-24be6766fb1f","Type":"ContainerDied","Data":"bbd9e4a2142dc0a16151c6d4689bbb79881d80ca568b9d5c64ff93284168615d"} Jan 05 23:20:44 crc kubenswrapper[4910]: I0105 23:20:44.803927 4910 scope.go:117] "RemoveContainer" containerID="5e687ab21379e04c3458ef6700953c03e89aa97a268d5d0221f372b112e00c26" Jan 05 23:20:44 crc kubenswrapper[4910]: I0105 23:20:44.803961 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5h9mm" Jan 05 23:20:44 crc kubenswrapper[4910]: I0105 23:20:44.835029 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5h9mm"] Jan 05 23:20:44 crc kubenswrapper[4910]: I0105 23:20:44.841885 4910 scope.go:117] "RemoveContainer" containerID="f4c118069fe70af815499316d2c36909cfe6313e788b3f52cc72b11a5b1869f5" Jan 05 23:20:44 crc kubenswrapper[4910]: I0105 23:20:44.846968 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-5h9mm"] Jan 05 23:20:44 crc kubenswrapper[4910]: I0105 23:20:44.871910 4910 scope.go:117] "RemoveContainer" containerID="7f68e87bc32cd90459bb982917b8e1074bb78158067c0fd7ccdfe3bc6e34c928" Jan 05 23:20:46 crc kubenswrapper[4910]: I0105 23:20:46.739896 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1fd882ef-1f78-4e52-bc06-24be6766fb1f" path="/var/lib/kubelet/pods/1fd882ef-1f78-4e52-bc06-24be6766fb1f/volumes" Jan 05 23:20:55 crc kubenswrapper[4910]: I0105 23:20:55.722337 4910 scope.go:117] "RemoveContainer" containerID="c31f0b3ea3ce0d4b0fb62da87fc6d8144c3940f5821592c646ec514147dae31e" Jan 05 23:20:55 crc kubenswrapper[4910]: E0105 23:20:55.723586 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:20:58 crc kubenswrapper[4910]: I0105 23:20:58.088985 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-lrl2t"] Jan 05 23:20:58 crc kubenswrapper[4910]: I0105 23:20:58.095083 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-lrl2t"] Jan 05 23:20:58 crc kubenswrapper[4910]: I0105 23:20:58.735154 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1aa172db-0909-4f70-9a54-b85121c67926" path="/var/lib/kubelet/pods/1aa172db-0909-4f70-9a54-b85121c67926/volumes" Jan 05 23:21:03 crc kubenswrapper[4910]: I0105 23:21:03.297804 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-snkkh"] Jan 05 23:21:03 crc kubenswrapper[4910]: E0105 23:21:03.299646 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8835948c-7425-4b43-abcd-c152feb46c72" containerName="registry-server" Jan 05 23:21:03 crc kubenswrapper[4910]: I0105 23:21:03.299673 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="8835948c-7425-4b43-abcd-c152feb46c72" containerName="registry-server" Jan 05 23:21:03 crc kubenswrapper[4910]: E0105 23:21:03.299701 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1fd882ef-1f78-4e52-bc06-24be6766fb1f" containerName="extract-content" Jan 05 23:21:03 crc kubenswrapper[4910]: I0105 23:21:03.299713 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="1fd882ef-1f78-4e52-bc06-24be6766fb1f" containerName="extract-content" Jan 05 23:21:03 crc kubenswrapper[4910]: E0105 23:21:03.299741 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8835948c-7425-4b43-abcd-c152feb46c72" containerName="extract-content" Jan 05 23:21:03 crc kubenswrapper[4910]: I0105 23:21:03.299752 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="8835948c-7425-4b43-abcd-c152feb46c72" containerName="extract-content" Jan 05 23:21:03 crc kubenswrapper[4910]: E0105 23:21:03.299767 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8835948c-7425-4b43-abcd-c152feb46c72" containerName="extract-utilities" Jan 05 23:21:03 crc kubenswrapper[4910]: I0105 23:21:03.299778 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="8835948c-7425-4b43-abcd-c152feb46c72" containerName="extract-utilities" Jan 05 23:21:03 crc kubenswrapper[4910]: E0105 23:21:03.299795 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1fd882ef-1f78-4e52-bc06-24be6766fb1f" containerName="extract-utilities" Jan 05 23:21:03 crc kubenswrapper[4910]: I0105 23:21:03.299806 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="1fd882ef-1f78-4e52-bc06-24be6766fb1f" containerName="extract-utilities" Jan 05 23:21:03 crc kubenswrapper[4910]: E0105 23:21:03.299851 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1fd882ef-1f78-4e52-bc06-24be6766fb1f" containerName="registry-server" Jan 05 23:21:03 crc kubenswrapper[4910]: I0105 23:21:03.299864 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="1fd882ef-1f78-4e52-bc06-24be6766fb1f" containerName="registry-server" Jan 05 23:21:03 crc kubenswrapper[4910]: I0105 23:21:03.300088 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="8835948c-7425-4b43-abcd-c152feb46c72" containerName="registry-server" Jan 05 23:21:03 crc kubenswrapper[4910]: I0105 23:21:03.300106 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="1fd882ef-1f78-4e52-bc06-24be6766fb1f" containerName="registry-server" Jan 05 23:21:03 crc kubenswrapper[4910]: I0105 23:21:03.301020 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-snkkh" Jan 05 23:21:03 crc kubenswrapper[4910]: I0105 23:21:03.315287 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-snkkh"] Jan 05 23:21:03 crc kubenswrapper[4910]: I0105 23:21:03.387654 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-6a14-account-create-update-4g5tk"] Jan 05 23:21:03 crc kubenswrapper[4910]: I0105 23:21:03.389550 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-6a14-account-create-update-4g5tk" Jan 05 23:21:03 crc kubenswrapper[4910]: I0105 23:21:03.391652 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Jan 05 23:21:03 crc kubenswrapper[4910]: I0105 23:21:03.398990 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-6a14-account-create-update-4g5tk"] Jan 05 23:21:03 crc kubenswrapper[4910]: I0105 23:21:03.474657 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46eee413-9f34-4931-b5f7-b6af4afcaa76-operator-scripts\") pod \"barbican-db-create-snkkh\" (UID: \"46eee413-9f34-4931-b5f7-b6af4afcaa76\") " pod="openstack/barbican-db-create-snkkh" Jan 05 23:21:03 crc kubenswrapper[4910]: I0105 23:21:03.474848 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g75hw\" (UniqueName: \"kubernetes.io/projected/46eee413-9f34-4931-b5f7-b6af4afcaa76-kube-api-access-g75hw\") pod \"barbican-db-create-snkkh\" (UID: \"46eee413-9f34-4931-b5f7-b6af4afcaa76\") " pod="openstack/barbican-db-create-snkkh" Jan 05 23:21:03 crc kubenswrapper[4910]: I0105 23:21:03.577195 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-db66d\" (UniqueName: \"kubernetes.io/projected/d0e6be1b-a676-49ef-93eb-90332cbaed03-kube-api-access-db66d\") pod \"barbican-6a14-account-create-update-4g5tk\" (UID: \"d0e6be1b-a676-49ef-93eb-90332cbaed03\") " pod="openstack/barbican-6a14-account-create-update-4g5tk" Jan 05 23:21:03 crc kubenswrapper[4910]: I0105 23:21:03.577308 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g75hw\" (UniqueName: \"kubernetes.io/projected/46eee413-9f34-4931-b5f7-b6af4afcaa76-kube-api-access-g75hw\") pod \"barbican-db-create-snkkh\" (UID: \"46eee413-9f34-4931-b5f7-b6af4afcaa76\") " pod="openstack/barbican-db-create-snkkh" Jan 05 23:21:03 crc kubenswrapper[4910]: I0105 23:21:03.577485 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d0e6be1b-a676-49ef-93eb-90332cbaed03-operator-scripts\") pod \"barbican-6a14-account-create-update-4g5tk\" (UID: \"d0e6be1b-a676-49ef-93eb-90332cbaed03\") " pod="openstack/barbican-6a14-account-create-update-4g5tk" Jan 05 23:21:03 crc kubenswrapper[4910]: I0105 23:21:03.577576 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46eee413-9f34-4931-b5f7-b6af4afcaa76-operator-scripts\") pod \"barbican-db-create-snkkh\" (UID: \"46eee413-9f34-4931-b5f7-b6af4afcaa76\") " pod="openstack/barbican-db-create-snkkh" Jan 05 23:21:03 crc kubenswrapper[4910]: I0105 23:21:03.578571 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46eee413-9f34-4931-b5f7-b6af4afcaa76-operator-scripts\") pod \"barbican-db-create-snkkh\" (UID: \"46eee413-9f34-4931-b5f7-b6af4afcaa76\") " pod="openstack/barbican-db-create-snkkh" Jan 05 23:21:03 crc kubenswrapper[4910]: I0105 23:21:03.603577 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g75hw\" (UniqueName: \"kubernetes.io/projected/46eee413-9f34-4931-b5f7-b6af4afcaa76-kube-api-access-g75hw\") pod \"barbican-db-create-snkkh\" (UID: \"46eee413-9f34-4931-b5f7-b6af4afcaa76\") " pod="openstack/barbican-db-create-snkkh" Jan 05 23:21:03 crc kubenswrapper[4910]: I0105 23:21:03.625414 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-snkkh" Jan 05 23:21:03 crc kubenswrapper[4910]: I0105 23:21:03.709264 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d0e6be1b-a676-49ef-93eb-90332cbaed03-operator-scripts\") pod \"barbican-6a14-account-create-update-4g5tk\" (UID: \"d0e6be1b-a676-49ef-93eb-90332cbaed03\") " pod="openstack/barbican-6a14-account-create-update-4g5tk" Jan 05 23:21:03 crc kubenswrapper[4910]: I0105 23:21:03.710682 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d0e6be1b-a676-49ef-93eb-90332cbaed03-operator-scripts\") pod \"barbican-6a14-account-create-update-4g5tk\" (UID: \"d0e6be1b-a676-49ef-93eb-90332cbaed03\") " pod="openstack/barbican-6a14-account-create-update-4g5tk" Jan 05 23:21:03 crc kubenswrapper[4910]: I0105 23:21:03.715808 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-db66d\" (UniqueName: \"kubernetes.io/projected/d0e6be1b-a676-49ef-93eb-90332cbaed03-kube-api-access-db66d\") pod \"barbican-6a14-account-create-update-4g5tk\" (UID: \"d0e6be1b-a676-49ef-93eb-90332cbaed03\") " pod="openstack/barbican-6a14-account-create-update-4g5tk" Jan 05 23:21:03 crc kubenswrapper[4910]: I0105 23:21:03.748023 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-db66d\" (UniqueName: \"kubernetes.io/projected/d0e6be1b-a676-49ef-93eb-90332cbaed03-kube-api-access-db66d\") pod \"barbican-6a14-account-create-update-4g5tk\" (UID: \"d0e6be1b-a676-49ef-93eb-90332cbaed03\") " pod="openstack/barbican-6a14-account-create-update-4g5tk" Jan 05 23:21:04 crc kubenswrapper[4910]: I0105 23:21:04.010773 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-6a14-account-create-update-4g5tk" Jan 05 23:21:04 crc kubenswrapper[4910]: I0105 23:21:04.124394 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-snkkh"] Jan 05 23:21:04 crc kubenswrapper[4910]: I0105 23:21:04.271713 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-6a14-account-create-update-4g5tk"] Jan 05 23:21:04 crc kubenswrapper[4910]: W0105 23:21:04.278309 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd0e6be1b_a676_49ef_93eb_90332cbaed03.slice/crio-3fc31098a1ffb3ec7eaf24e201adab2bb9bb3062e7e4430ad6dd17179524f544 WatchSource:0}: Error finding container 3fc31098a1ffb3ec7eaf24e201adab2bb9bb3062e7e4430ad6dd17179524f544: Status 404 returned error can't find the container with id 3fc31098a1ffb3ec7eaf24e201adab2bb9bb3062e7e4430ad6dd17179524f544 Jan 05 23:21:05 crc kubenswrapper[4910]: I0105 23:21:05.016921 4910 generic.go:334] "Generic (PLEG): container finished" podID="46eee413-9f34-4931-b5f7-b6af4afcaa76" containerID="be74830cf4fb9fca766fe680f75626aafaa6e4d44c127d4e316b2cdebcf45b78" exitCode=0 Jan 05 23:21:05 crc kubenswrapper[4910]: I0105 23:21:05.016996 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-snkkh" event={"ID":"46eee413-9f34-4931-b5f7-b6af4afcaa76","Type":"ContainerDied","Data":"be74830cf4fb9fca766fe680f75626aafaa6e4d44c127d4e316b2cdebcf45b78"} Jan 05 23:21:05 crc kubenswrapper[4910]: I0105 23:21:05.017550 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-snkkh" event={"ID":"46eee413-9f34-4931-b5f7-b6af4afcaa76","Type":"ContainerStarted","Data":"23f507057c6b5ff0e78567604a9319444ccde01f20b06ceb1a1d0148fd7b174b"} Jan 05 23:21:05 crc kubenswrapper[4910]: I0105 23:21:05.025314 4910 generic.go:334] "Generic (PLEG): container finished" podID="d0e6be1b-a676-49ef-93eb-90332cbaed03" containerID="e94082365609cceb04e6d9f40175c7d32943fefe7a1c465491770fc03ad68fb0" exitCode=0 Jan 05 23:21:05 crc kubenswrapper[4910]: I0105 23:21:05.025400 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-6a14-account-create-update-4g5tk" event={"ID":"d0e6be1b-a676-49ef-93eb-90332cbaed03","Type":"ContainerDied","Data":"e94082365609cceb04e6d9f40175c7d32943fefe7a1c465491770fc03ad68fb0"} Jan 05 23:21:05 crc kubenswrapper[4910]: I0105 23:21:05.025487 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-6a14-account-create-update-4g5tk" event={"ID":"d0e6be1b-a676-49ef-93eb-90332cbaed03","Type":"ContainerStarted","Data":"3fc31098a1ffb3ec7eaf24e201adab2bb9bb3062e7e4430ad6dd17179524f544"} Jan 05 23:21:06 crc kubenswrapper[4910]: I0105 23:21:06.510205 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-snkkh" Jan 05 23:21:06 crc kubenswrapper[4910]: I0105 23:21:06.516511 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-6a14-account-create-update-4g5tk" Jan 05 23:21:06 crc kubenswrapper[4910]: I0105 23:21:06.581923 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-db66d\" (UniqueName: \"kubernetes.io/projected/d0e6be1b-a676-49ef-93eb-90332cbaed03-kube-api-access-db66d\") pod \"d0e6be1b-a676-49ef-93eb-90332cbaed03\" (UID: \"d0e6be1b-a676-49ef-93eb-90332cbaed03\") " Jan 05 23:21:06 crc kubenswrapper[4910]: I0105 23:21:06.581994 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g75hw\" (UniqueName: \"kubernetes.io/projected/46eee413-9f34-4931-b5f7-b6af4afcaa76-kube-api-access-g75hw\") pod \"46eee413-9f34-4931-b5f7-b6af4afcaa76\" (UID: \"46eee413-9f34-4931-b5f7-b6af4afcaa76\") " Jan 05 23:21:06 crc kubenswrapper[4910]: I0105 23:21:06.582057 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d0e6be1b-a676-49ef-93eb-90332cbaed03-operator-scripts\") pod \"d0e6be1b-a676-49ef-93eb-90332cbaed03\" (UID: \"d0e6be1b-a676-49ef-93eb-90332cbaed03\") " Jan 05 23:21:06 crc kubenswrapper[4910]: I0105 23:21:06.582137 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46eee413-9f34-4931-b5f7-b6af4afcaa76-operator-scripts\") pod \"46eee413-9f34-4931-b5f7-b6af4afcaa76\" (UID: \"46eee413-9f34-4931-b5f7-b6af4afcaa76\") " Jan 05 23:21:06 crc kubenswrapper[4910]: I0105 23:21:06.583008 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0e6be1b-a676-49ef-93eb-90332cbaed03-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d0e6be1b-a676-49ef-93eb-90332cbaed03" (UID: "d0e6be1b-a676-49ef-93eb-90332cbaed03"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:21:06 crc kubenswrapper[4910]: I0105 23:21:06.583112 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/46eee413-9f34-4931-b5f7-b6af4afcaa76-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "46eee413-9f34-4931-b5f7-b6af4afcaa76" (UID: "46eee413-9f34-4931-b5f7-b6af4afcaa76"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:21:06 crc kubenswrapper[4910]: I0105 23:21:06.589544 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0e6be1b-a676-49ef-93eb-90332cbaed03-kube-api-access-db66d" (OuterVolumeSpecName: "kube-api-access-db66d") pod "d0e6be1b-a676-49ef-93eb-90332cbaed03" (UID: "d0e6be1b-a676-49ef-93eb-90332cbaed03"). InnerVolumeSpecName "kube-api-access-db66d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:21:06 crc kubenswrapper[4910]: I0105 23:21:06.589624 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46eee413-9f34-4931-b5f7-b6af4afcaa76-kube-api-access-g75hw" (OuterVolumeSpecName: "kube-api-access-g75hw") pod "46eee413-9f34-4931-b5f7-b6af4afcaa76" (UID: "46eee413-9f34-4931-b5f7-b6af4afcaa76"). InnerVolumeSpecName "kube-api-access-g75hw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:21:06 crc kubenswrapper[4910]: I0105 23:21:06.683669 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d0e6be1b-a676-49ef-93eb-90332cbaed03-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:21:06 crc kubenswrapper[4910]: I0105 23:21:06.683938 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46eee413-9f34-4931-b5f7-b6af4afcaa76-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:21:06 crc kubenswrapper[4910]: I0105 23:21:06.684064 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-db66d\" (UniqueName: \"kubernetes.io/projected/d0e6be1b-a676-49ef-93eb-90332cbaed03-kube-api-access-db66d\") on node \"crc\" DevicePath \"\"" Jan 05 23:21:06 crc kubenswrapper[4910]: I0105 23:21:06.684220 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g75hw\" (UniqueName: \"kubernetes.io/projected/46eee413-9f34-4931-b5f7-b6af4afcaa76-kube-api-access-g75hw\") on node \"crc\" DevicePath \"\"" Jan 05 23:21:07 crc kubenswrapper[4910]: I0105 23:21:07.048468 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-snkkh" event={"ID":"46eee413-9f34-4931-b5f7-b6af4afcaa76","Type":"ContainerDied","Data":"23f507057c6b5ff0e78567604a9319444ccde01f20b06ceb1a1d0148fd7b174b"} Jan 05 23:21:07 crc kubenswrapper[4910]: I0105 23:21:07.048520 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="23f507057c6b5ff0e78567604a9319444ccde01f20b06ceb1a1d0148fd7b174b" Jan 05 23:21:07 crc kubenswrapper[4910]: I0105 23:21:07.048561 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-snkkh" Jan 05 23:21:07 crc kubenswrapper[4910]: I0105 23:21:07.051313 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-6a14-account-create-update-4g5tk" event={"ID":"d0e6be1b-a676-49ef-93eb-90332cbaed03","Type":"ContainerDied","Data":"3fc31098a1ffb3ec7eaf24e201adab2bb9bb3062e7e4430ad6dd17179524f544"} Jan 05 23:21:07 crc kubenswrapper[4910]: I0105 23:21:07.051339 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3fc31098a1ffb3ec7eaf24e201adab2bb9bb3062e7e4430ad6dd17179524f544" Jan 05 23:21:07 crc kubenswrapper[4910]: I0105 23:21:07.051492 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-6a14-account-create-update-4g5tk" Jan 05 23:21:07 crc kubenswrapper[4910]: I0105 23:21:07.722591 4910 scope.go:117] "RemoveContainer" containerID="c31f0b3ea3ce0d4b0fb62da87fc6d8144c3940f5821592c646ec514147dae31e" Jan 05 23:21:07 crc kubenswrapper[4910]: E0105 23:21:07.724420 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:21:08 crc kubenswrapper[4910]: I0105 23:21:08.683071 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-sfck6"] Jan 05 23:21:08 crc kubenswrapper[4910]: E0105 23:21:08.683836 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0e6be1b-a676-49ef-93eb-90332cbaed03" containerName="mariadb-account-create-update" Jan 05 23:21:08 crc kubenswrapper[4910]: I0105 23:21:08.683868 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0e6be1b-a676-49ef-93eb-90332cbaed03" containerName="mariadb-account-create-update" Jan 05 23:21:08 crc kubenswrapper[4910]: E0105 23:21:08.683892 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46eee413-9f34-4931-b5f7-b6af4afcaa76" containerName="mariadb-database-create" Jan 05 23:21:08 crc kubenswrapper[4910]: I0105 23:21:08.683906 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="46eee413-9f34-4931-b5f7-b6af4afcaa76" containerName="mariadb-database-create" Jan 05 23:21:08 crc kubenswrapper[4910]: I0105 23:21:08.684208 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0e6be1b-a676-49ef-93eb-90332cbaed03" containerName="mariadb-account-create-update" Jan 05 23:21:08 crc kubenswrapper[4910]: I0105 23:21:08.684264 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="46eee413-9f34-4931-b5f7-b6af4afcaa76" containerName="mariadb-database-create" Jan 05 23:21:08 crc kubenswrapper[4910]: I0105 23:21:08.685103 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-sfck6" Jan 05 23:21:08 crc kubenswrapper[4910]: I0105 23:21:08.688780 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 05 23:21:08 crc kubenswrapper[4910]: I0105 23:21:08.688814 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-7xf7g" Jan 05 23:21:08 crc kubenswrapper[4910]: I0105 23:21:08.702686 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-sfck6"] Jan 05 23:21:08 crc kubenswrapper[4910]: I0105 23:21:08.836720 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc069c6b-45d0-4bca-a3e2-819cc238c46a-combined-ca-bundle\") pod \"barbican-db-sync-sfck6\" (UID: \"fc069c6b-45d0-4bca-a3e2-819cc238c46a\") " pod="openstack/barbican-db-sync-sfck6" Jan 05 23:21:08 crc kubenswrapper[4910]: I0105 23:21:08.837404 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fc069c6b-45d0-4bca-a3e2-819cc238c46a-db-sync-config-data\") pod \"barbican-db-sync-sfck6\" (UID: \"fc069c6b-45d0-4bca-a3e2-819cc238c46a\") " pod="openstack/barbican-db-sync-sfck6" Jan 05 23:21:08 crc kubenswrapper[4910]: I0105 23:21:08.838334 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5968x\" (UniqueName: \"kubernetes.io/projected/fc069c6b-45d0-4bca-a3e2-819cc238c46a-kube-api-access-5968x\") pod \"barbican-db-sync-sfck6\" (UID: \"fc069c6b-45d0-4bca-a3e2-819cc238c46a\") " pod="openstack/barbican-db-sync-sfck6" Jan 05 23:21:08 crc kubenswrapper[4910]: I0105 23:21:08.939556 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fc069c6b-45d0-4bca-a3e2-819cc238c46a-db-sync-config-data\") pod \"barbican-db-sync-sfck6\" (UID: \"fc069c6b-45d0-4bca-a3e2-819cc238c46a\") " pod="openstack/barbican-db-sync-sfck6" Jan 05 23:21:08 crc kubenswrapper[4910]: I0105 23:21:08.939934 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5968x\" (UniqueName: \"kubernetes.io/projected/fc069c6b-45d0-4bca-a3e2-819cc238c46a-kube-api-access-5968x\") pod \"barbican-db-sync-sfck6\" (UID: \"fc069c6b-45d0-4bca-a3e2-819cc238c46a\") " pod="openstack/barbican-db-sync-sfck6" Jan 05 23:21:08 crc kubenswrapper[4910]: I0105 23:21:08.940010 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc069c6b-45d0-4bca-a3e2-819cc238c46a-combined-ca-bundle\") pod \"barbican-db-sync-sfck6\" (UID: \"fc069c6b-45d0-4bca-a3e2-819cc238c46a\") " pod="openstack/barbican-db-sync-sfck6" Jan 05 23:21:08 crc kubenswrapper[4910]: I0105 23:21:08.944025 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc069c6b-45d0-4bca-a3e2-819cc238c46a-combined-ca-bundle\") pod \"barbican-db-sync-sfck6\" (UID: \"fc069c6b-45d0-4bca-a3e2-819cc238c46a\") " pod="openstack/barbican-db-sync-sfck6" Jan 05 23:21:08 crc kubenswrapper[4910]: I0105 23:21:08.953586 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fc069c6b-45d0-4bca-a3e2-819cc238c46a-db-sync-config-data\") pod \"barbican-db-sync-sfck6\" (UID: \"fc069c6b-45d0-4bca-a3e2-819cc238c46a\") " pod="openstack/barbican-db-sync-sfck6" Jan 05 23:21:08 crc kubenswrapper[4910]: I0105 23:21:08.971195 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5968x\" (UniqueName: \"kubernetes.io/projected/fc069c6b-45d0-4bca-a3e2-819cc238c46a-kube-api-access-5968x\") pod \"barbican-db-sync-sfck6\" (UID: \"fc069c6b-45d0-4bca-a3e2-819cc238c46a\") " pod="openstack/barbican-db-sync-sfck6" Jan 05 23:21:09 crc kubenswrapper[4910]: I0105 23:21:09.009591 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-sfck6" Jan 05 23:21:09 crc kubenswrapper[4910]: I0105 23:21:09.653783 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-sfck6"] Jan 05 23:21:09 crc kubenswrapper[4910]: W0105 23:21:09.662573 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfc069c6b_45d0_4bca_a3e2_819cc238c46a.slice/crio-3b17edbc3ecd0c4a02d2e525b89da8ab285418d5c4ba0aedd9d1e7334996d9bf WatchSource:0}: Error finding container 3b17edbc3ecd0c4a02d2e525b89da8ab285418d5c4ba0aedd9d1e7334996d9bf: Status 404 returned error can't find the container with id 3b17edbc3ecd0c4a02d2e525b89da8ab285418d5c4ba0aedd9d1e7334996d9bf Jan 05 23:21:10 crc kubenswrapper[4910]: I0105 23:21:10.112199 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-sfck6" event={"ID":"fc069c6b-45d0-4bca-a3e2-819cc238c46a","Type":"ContainerStarted","Data":"2be6d3b22040bcb1f7353a9c562c48f0162d1e94509e046aaf2ed2f5131b462b"} Jan 05 23:21:10 crc kubenswrapper[4910]: I0105 23:21:10.113824 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-sfck6" event={"ID":"fc069c6b-45d0-4bca-a3e2-819cc238c46a","Type":"ContainerStarted","Data":"3b17edbc3ecd0c4a02d2e525b89da8ab285418d5c4ba0aedd9d1e7334996d9bf"} Jan 05 23:21:10 crc kubenswrapper[4910]: I0105 23:21:10.146651 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-sfck6" podStartSLOduration=2.146615093 podStartE2EDuration="2.146615093s" podCreationTimestamp="2026-01-05 23:21:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:21:10.135475077 +0000 UTC m=+5401.712972757" watchObservedRunningTime="2026-01-05 23:21:10.146615093 +0000 UTC m=+5401.724112803" Jan 05 23:21:11 crc kubenswrapper[4910]: I0105 23:21:11.125576 4910 generic.go:334] "Generic (PLEG): container finished" podID="fc069c6b-45d0-4bca-a3e2-819cc238c46a" containerID="2be6d3b22040bcb1f7353a9c562c48f0162d1e94509e046aaf2ed2f5131b462b" exitCode=0 Jan 05 23:21:11 crc kubenswrapper[4910]: I0105 23:21:11.125673 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-sfck6" event={"ID":"fc069c6b-45d0-4bca-a3e2-819cc238c46a","Type":"ContainerDied","Data":"2be6d3b22040bcb1f7353a9c562c48f0162d1e94509e046aaf2ed2f5131b462b"} Jan 05 23:21:12 crc kubenswrapper[4910]: I0105 23:21:12.501195 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-sfck6" Jan 05 23:21:12 crc kubenswrapper[4910]: I0105 23:21:12.609104 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5968x\" (UniqueName: \"kubernetes.io/projected/fc069c6b-45d0-4bca-a3e2-819cc238c46a-kube-api-access-5968x\") pod \"fc069c6b-45d0-4bca-a3e2-819cc238c46a\" (UID: \"fc069c6b-45d0-4bca-a3e2-819cc238c46a\") " Jan 05 23:21:12 crc kubenswrapper[4910]: I0105 23:21:12.609175 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fc069c6b-45d0-4bca-a3e2-819cc238c46a-db-sync-config-data\") pod \"fc069c6b-45d0-4bca-a3e2-819cc238c46a\" (UID: \"fc069c6b-45d0-4bca-a3e2-819cc238c46a\") " Jan 05 23:21:12 crc kubenswrapper[4910]: I0105 23:21:12.609208 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc069c6b-45d0-4bca-a3e2-819cc238c46a-combined-ca-bundle\") pod \"fc069c6b-45d0-4bca-a3e2-819cc238c46a\" (UID: \"fc069c6b-45d0-4bca-a3e2-819cc238c46a\") " Jan 05 23:21:12 crc kubenswrapper[4910]: I0105 23:21:12.618670 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc069c6b-45d0-4bca-a3e2-819cc238c46a-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "fc069c6b-45d0-4bca-a3e2-819cc238c46a" (UID: "fc069c6b-45d0-4bca-a3e2-819cc238c46a"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:21:12 crc kubenswrapper[4910]: I0105 23:21:12.629685 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc069c6b-45d0-4bca-a3e2-819cc238c46a-kube-api-access-5968x" (OuterVolumeSpecName: "kube-api-access-5968x") pod "fc069c6b-45d0-4bca-a3e2-819cc238c46a" (UID: "fc069c6b-45d0-4bca-a3e2-819cc238c46a"). InnerVolumeSpecName "kube-api-access-5968x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:21:12 crc kubenswrapper[4910]: I0105 23:21:12.650982 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc069c6b-45d0-4bca-a3e2-819cc238c46a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fc069c6b-45d0-4bca-a3e2-819cc238c46a" (UID: "fc069c6b-45d0-4bca-a3e2-819cc238c46a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:21:12 crc kubenswrapper[4910]: I0105 23:21:12.710710 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5968x\" (UniqueName: \"kubernetes.io/projected/fc069c6b-45d0-4bca-a3e2-819cc238c46a-kube-api-access-5968x\") on node \"crc\" DevicePath \"\"" Jan 05 23:21:12 crc kubenswrapper[4910]: I0105 23:21:12.710774 4910 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fc069c6b-45d0-4bca-a3e2-819cc238c46a-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:21:12 crc kubenswrapper[4910]: I0105 23:21:12.710785 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc069c6b-45d0-4bca-a3e2-819cc238c46a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.148389 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-sfck6" event={"ID":"fc069c6b-45d0-4bca-a3e2-819cc238c46a","Type":"ContainerDied","Data":"3b17edbc3ecd0c4a02d2e525b89da8ab285418d5c4ba0aedd9d1e7334996d9bf"} Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.148454 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3b17edbc3ecd0c4a02d2e525b89da8ab285418d5c4ba0aedd9d1e7334996d9bf" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.148516 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-sfck6" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.394454 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-574b95949c-7xl5m"] Jan 05 23:21:13 crc kubenswrapper[4910]: E0105 23:21:13.394873 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc069c6b-45d0-4bca-a3e2-819cc238c46a" containerName="barbican-db-sync" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.394892 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc069c6b-45d0-4bca-a3e2-819cc238c46a" containerName="barbican-db-sync" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.395076 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc069c6b-45d0-4bca-a3e2-819cc238c46a" containerName="barbican-db-sync" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.396104 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-574b95949c-7xl5m" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.406188 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.406527 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.406637 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-7xf7g" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.410504 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-574b95949c-7xl5m"] Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.417823 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-67d484f698-cn4m6"] Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.420376 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-67d484f698-cn4m6" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.507796 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.568396 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97fb16df-a478-4676-bab1-3eb2033abed6-combined-ca-bundle\") pod \"barbican-keystone-listener-67d484f698-cn4m6\" (UID: \"97fb16df-a478-4676-bab1-3eb2033abed6\") " pod="openstack/barbican-keystone-listener-67d484f698-cn4m6" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.569015 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t622k\" (UniqueName: \"kubernetes.io/projected/97fb16df-a478-4676-bab1-3eb2033abed6-kube-api-access-t622k\") pod \"barbican-keystone-listener-67d484f698-cn4m6\" (UID: \"97fb16df-a478-4676-bab1-3eb2033abed6\") " pod="openstack/barbican-keystone-listener-67d484f698-cn4m6" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.569074 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/753baa24-890a-44bb-9e90-78afd9665bfa-config-data\") pod \"barbican-worker-574b95949c-7xl5m\" (UID: \"753baa24-890a-44bb-9e90-78afd9665bfa\") " pod="openstack/barbican-worker-574b95949c-7xl5m" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.569219 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9ml9m\" (UniqueName: \"kubernetes.io/projected/753baa24-890a-44bb-9e90-78afd9665bfa-kube-api-access-9ml9m\") pod \"barbican-worker-574b95949c-7xl5m\" (UID: \"753baa24-890a-44bb-9e90-78afd9665bfa\") " pod="openstack/barbican-worker-574b95949c-7xl5m" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.569300 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/753baa24-890a-44bb-9e90-78afd9665bfa-logs\") pod \"barbican-worker-574b95949c-7xl5m\" (UID: \"753baa24-890a-44bb-9e90-78afd9665bfa\") " pod="openstack/barbican-worker-574b95949c-7xl5m" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.569343 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97fb16df-a478-4676-bab1-3eb2033abed6-logs\") pod \"barbican-keystone-listener-67d484f698-cn4m6\" (UID: \"97fb16df-a478-4676-bab1-3eb2033abed6\") " pod="openstack/barbican-keystone-listener-67d484f698-cn4m6" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.569402 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/753baa24-890a-44bb-9e90-78afd9665bfa-combined-ca-bundle\") pod \"barbican-worker-574b95949c-7xl5m\" (UID: \"753baa24-890a-44bb-9e90-78afd9665bfa\") " pod="openstack/barbican-worker-574b95949c-7xl5m" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.569521 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97fb16df-a478-4676-bab1-3eb2033abed6-config-data\") pod \"barbican-keystone-listener-67d484f698-cn4m6\" (UID: \"97fb16df-a478-4676-bab1-3eb2033abed6\") " pod="openstack/barbican-keystone-listener-67d484f698-cn4m6" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.569737 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/97fb16df-a478-4676-bab1-3eb2033abed6-config-data-custom\") pod \"barbican-keystone-listener-67d484f698-cn4m6\" (UID: \"97fb16df-a478-4676-bab1-3eb2033abed6\") " pod="openstack/barbican-keystone-listener-67d484f698-cn4m6" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.569795 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/753baa24-890a-44bb-9e90-78afd9665bfa-config-data-custom\") pod \"barbican-worker-574b95949c-7xl5m\" (UID: \"753baa24-890a-44bb-9e90-78afd9665bfa\") " pod="openstack/barbican-worker-574b95949c-7xl5m" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.599192 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-67d484f698-cn4m6"] Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.670826 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97fb16df-a478-4676-bab1-3eb2033abed6-config-data\") pod \"barbican-keystone-listener-67d484f698-cn4m6\" (UID: \"97fb16df-a478-4676-bab1-3eb2033abed6\") " pod="openstack/barbican-keystone-listener-67d484f698-cn4m6" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.682373 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/97fb16df-a478-4676-bab1-3eb2033abed6-config-data-custom\") pod \"barbican-keystone-listener-67d484f698-cn4m6\" (UID: \"97fb16df-a478-4676-bab1-3eb2033abed6\") " pod="openstack/barbican-keystone-listener-67d484f698-cn4m6" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.682463 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/753baa24-890a-44bb-9e90-78afd9665bfa-config-data-custom\") pod \"barbican-worker-574b95949c-7xl5m\" (UID: \"753baa24-890a-44bb-9e90-78afd9665bfa\") " pod="openstack/barbican-worker-574b95949c-7xl5m" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.682535 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97fb16df-a478-4676-bab1-3eb2033abed6-combined-ca-bundle\") pod \"barbican-keystone-listener-67d484f698-cn4m6\" (UID: \"97fb16df-a478-4676-bab1-3eb2033abed6\") " pod="openstack/barbican-keystone-listener-67d484f698-cn4m6" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.682584 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t622k\" (UniqueName: \"kubernetes.io/projected/97fb16df-a478-4676-bab1-3eb2033abed6-kube-api-access-t622k\") pod \"barbican-keystone-listener-67d484f698-cn4m6\" (UID: \"97fb16df-a478-4676-bab1-3eb2033abed6\") " pod="openstack/barbican-keystone-listener-67d484f698-cn4m6" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.682650 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/753baa24-890a-44bb-9e90-78afd9665bfa-config-data\") pod \"barbican-worker-574b95949c-7xl5m\" (UID: \"753baa24-890a-44bb-9e90-78afd9665bfa\") " pod="openstack/barbican-worker-574b95949c-7xl5m" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.682753 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9ml9m\" (UniqueName: \"kubernetes.io/projected/753baa24-890a-44bb-9e90-78afd9665bfa-kube-api-access-9ml9m\") pod \"barbican-worker-574b95949c-7xl5m\" (UID: \"753baa24-890a-44bb-9e90-78afd9665bfa\") " pod="openstack/barbican-worker-574b95949c-7xl5m" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.682819 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/753baa24-890a-44bb-9e90-78afd9665bfa-logs\") pod \"barbican-worker-574b95949c-7xl5m\" (UID: \"753baa24-890a-44bb-9e90-78afd9665bfa\") " pod="openstack/barbican-worker-574b95949c-7xl5m" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.682860 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97fb16df-a478-4676-bab1-3eb2033abed6-logs\") pod \"barbican-keystone-listener-67d484f698-cn4m6\" (UID: \"97fb16df-a478-4676-bab1-3eb2033abed6\") " pod="openstack/barbican-keystone-listener-67d484f698-cn4m6" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.682923 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/753baa24-890a-44bb-9e90-78afd9665bfa-combined-ca-bundle\") pod \"barbican-worker-574b95949c-7xl5m\" (UID: \"753baa24-890a-44bb-9e90-78afd9665bfa\") " pod="openstack/barbican-worker-574b95949c-7xl5m" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.702933 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/753baa24-890a-44bb-9e90-78afd9665bfa-logs\") pod \"barbican-worker-574b95949c-7xl5m\" (UID: \"753baa24-890a-44bb-9e90-78afd9665bfa\") " pod="openstack/barbican-worker-574b95949c-7xl5m" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.703282 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97fb16df-a478-4676-bab1-3eb2033abed6-logs\") pod \"barbican-keystone-listener-67d484f698-cn4m6\" (UID: \"97fb16df-a478-4676-bab1-3eb2033abed6\") " pod="openstack/barbican-keystone-listener-67d484f698-cn4m6" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.706167 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/97fb16df-a478-4676-bab1-3eb2033abed6-config-data-custom\") pod \"barbican-keystone-listener-67d484f698-cn4m6\" (UID: \"97fb16df-a478-4676-bab1-3eb2033abed6\") " pod="openstack/barbican-keystone-listener-67d484f698-cn4m6" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.713117 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97fb16df-a478-4676-bab1-3eb2033abed6-combined-ca-bundle\") pod \"barbican-keystone-listener-67d484f698-cn4m6\" (UID: \"97fb16df-a478-4676-bab1-3eb2033abed6\") " pod="openstack/barbican-keystone-listener-67d484f698-cn4m6" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.717744 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/753baa24-890a-44bb-9e90-78afd9665bfa-combined-ca-bundle\") pod \"barbican-worker-574b95949c-7xl5m\" (UID: \"753baa24-890a-44bb-9e90-78afd9665bfa\") " pod="openstack/barbican-worker-574b95949c-7xl5m" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.723547 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/753baa24-890a-44bb-9e90-78afd9665bfa-config-data\") pod \"barbican-worker-574b95949c-7xl5m\" (UID: \"753baa24-890a-44bb-9e90-78afd9665bfa\") " pod="openstack/barbican-worker-574b95949c-7xl5m" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.724535 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97fb16df-a478-4676-bab1-3eb2033abed6-config-data\") pod \"barbican-keystone-listener-67d484f698-cn4m6\" (UID: \"97fb16df-a478-4676-bab1-3eb2033abed6\") " pod="openstack/barbican-keystone-listener-67d484f698-cn4m6" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.734696 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/753baa24-890a-44bb-9e90-78afd9665bfa-config-data-custom\") pod \"barbican-worker-574b95949c-7xl5m\" (UID: \"753baa24-890a-44bb-9e90-78afd9665bfa\") " pod="openstack/barbican-worker-574b95949c-7xl5m" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.735537 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t622k\" (UniqueName: \"kubernetes.io/projected/97fb16df-a478-4676-bab1-3eb2033abed6-kube-api-access-t622k\") pod \"barbican-keystone-listener-67d484f698-cn4m6\" (UID: \"97fb16df-a478-4676-bab1-3eb2033abed6\") " pod="openstack/barbican-keystone-listener-67d484f698-cn4m6" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.804734 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6d84fc7bc9-gzscs"] Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.806166 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d84fc7bc9-gzscs" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.810859 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9ml9m\" (UniqueName: \"kubernetes.io/projected/753baa24-890a-44bb-9e90-78afd9665bfa-kube-api-access-9ml9m\") pod \"barbican-worker-574b95949c-7xl5m\" (UID: \"753baa24-890a-44bb-9e90-78afd9665bfa\") " pod="openstack/barbican-worker-574b95949c-7xl5m" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.815541 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-67d484f698-cn4m6" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.823492 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d84fc7bc9-gzscs"] Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.937020 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-6cdf5d85b6-8zbr5"] Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.938480 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6cdf5d85b6-8zbr5" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.941797 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.965979 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6cdf5d85b6-8zbr5"] Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.992300 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3ae156b9-e4ff-4442-af05-16a5c951c5e0-ovsdbserver-nb\") pod \"dnsmasq-dns-6d84fc7bc9-gzscs\" (UID: \"3ae156b9-e4ff-4442-af05-16a5c951c5e0\") " pod="openstack/dnsmasq-dns-6d84fc7bc9-gzscs" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.992354 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3ae156b9-e4ff-4442-af05-16a5c951c5e0-dns-svc\") pod \"dnsmasq-dns-6d84fc7bc9-gzscs\" (UID: \"3ae156b9-e4ff-4442-af05-16a5c951c5e0\") " pod="openstack/dnsmasq-dns-6d84fc7bc9-gzscs" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.992398 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3ae156b9-e4ff-4442-af05-16a5c951c5e0-ovsdbserver-sb\") pod \"dnsmasq-dns-6d84fc7bc9-gzscs\" (UID: \"3ae156b9-e4ff-4442-af05-16a5c951c5e0\") " pod="openstack/dnsmasq-dns-6d84fc7bc9-gzscs" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.992425 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ae156b9-e4ff-4442-af05-16a5c951c5e0-config\") pod \"dnsmasq-dns-6d84fc7bc9-gzscs\" (UID: \"3ae156b9-e4ff-4442-af05-16a5c951c5e0\") " pod="openstack/dnsmasq-dns-6d84fc7bc9-gzscs" Jan 05 23:21:13 crc kubenswrapper[4910]: I0105 23:21:13.992468 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zq7gw\" (UniqueName: \"kubernetes.io/projected/3ae156b9-e4ff-4442-af05-16a5c951c5e0-kube-api-access-zq7gw\") pod \"dnsmasq-dns-6d84fc7bc9-gzscs\" (UID: \"3ae156b9-e4ff-4442-af05-16a5c951c5e0\") " pod="openstack/dnsmasq-dns-6d84fc7bc9-gzscs" Jan 05 23:21:14 crc kubenswrapper[4910]: I0105 23:21:14.095529 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-574b95949c-7xl5m" Jan 05 23:21:14 crc kubenswrapper[4910]: I0105 23:21:14.100673 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af4ff390-1ab3-42d8-be1f-126a38d4b313-combined-ca-bundle\") pod \"barbican-api-6cdf5d85b6-8zbr5\" (UID: \"af4ff390-1ab3-42d8-be1f-126a38d4b313\") " pod="openstack/barbican-api-6cdf5d85b6-8zbr5" Jan 05 23:21:14 crc kubenswrapper[4910]: I0105 23:21:14.100833 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zq7gw\" (UniqueName: \"kubernetes.io/projected/3ae156b9-e4ff-4442-af05-16a5c951c5e0-kube-api-access-zq7gw\") pod \"dnsmasq-dns-6d84fc7bc9-gzscs\" (UID: \"3ae156b9-e4ff-4442-af05-16a5c951c5e0\") " pod="openstack/dnsmasq-dns-6d84fc7bc9-gzscs" Jan 05 23:21:14 crc kubenswrapper[4910]: I0105 23:21:14.100862 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/af4ff390-1ab3-42d8-be1f-126a38d4b313-config-data-custom\") pod \"barbican-api-6cdf5d85b6-8zbr5\" (UID: \"af4ff390-1ab3-42d8-be1f-126a38d4b313\") " pod="openstack/barbican-api-6cdf5d85b6-8zbr5" Jan 05 23:21:14 crc kubenswrapper[4910]: I0105 23:21:14.100921 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/af4ff390-1ab3-42d8-be1f-126a38d4b313-logs\") pod \"barbican-api-6cdf5d85b6-8zbr5\" (UID: \"af4ff390-1ab3-42d8-be1f-126a38d4b313\") " pod="openstack/barbican-api-6cdf5d85b6-8zbr5" Jan 05 23:21:14 crc kubenswrapper[4910]: I0105 23:21:14.100992 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af4ff390-1ab3-42d8-be1f-126a38d4b313-config-data\") pod \"barbican-api-6cdf5d85b6-8zbr5\" (UID: \"af4ff390-1ab3-42d8-be1f-126a38d4b313\") " pod="openstack/barbican-api-6cdf5d85b6-8zbr5" Jan 05 23:21:14 crc kubenswrapper[4910]: I0105 23:21:14.101092 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3ae156b9-e4ff-4442-af05-16a5c951c5e0-ovsdbserver-nb\") pod \"dnsmasq-dns-6d84fc7bc9-gzscs\" (UID: \"3ae156b9-e4ff-4442-af05-16a5c951c5e0\") " pod="openstack/dnsmasq-dns-6d84fc7bc9-gzscs" Jan 05 23:21:14 crc kubenswrapper[4910]: I0105 23:21:14.101174 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3ae156b9-e4ff-4442-af05-16a5c951c5e0-dns-svc\") pod \"dnsmasq-dns-6d84fc7bc9-gzscs\" (UID: \"3ae156b9-e4ff-4442-af05-16a5c951c5e0\") " pod="openstack/dnsmasq-dns-6d84fc7bc9-gzscs" Jan 05 23:21:14 crc kubenswrapper[4910]: I0105 23:21:14.102686 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3ae156b9-e4ff-4442-af05-16a5c951c5e0-ovsdbserver-nb\") pod \"dnsmasq-dns-6d84fc7bc9-gzscs\" (UID: \"3ae156b9-e4ff-4442-af05-16a5c951c5e0\") " pod="openstack/dnsmasq-dns-6d84fc7bc9-gzscs" Jan 05 23:21:14 crc kubenswrapper[4910]: I0105 23:21:14.102958 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3ae156b9-e4ff-4442-af05-16a5c951c5e0-dns-svc\") pod \"dnsmasq-dns-6d84fc7bc9-gzscs\" (UID: \"3ae156b9-e4ff-4442-af05-16a5c951c5e0\") " pod="openstack/dnsmasq-dns-6d84fc7bc9-gzscs" Jan 05 23:21:14 crc kubenswrapper[4910]: I0105 23:21:14.103033 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ldz7\" (UniqueName: \"kubernetes.io/projected/af4ff390-1ab3-42d8-be1f-126a38d4b313-kube-api-access-8ldz7\") pod \"barbican-api-6cdf5d85b6-8zbr5\" (UID: \"af4ff390-1ab3-42d8-be1f-126a38d4b313\") " pod="openstack/barbican-api-6cdf5d85b6-8zbr5" Jan 05 23:21:14 crc kubenswrapper[4910]: I0105 23:21:14.103098 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3ae156b9-e4ff-4442-af05-16a5c951c5e0-ovsdbserver-sb\") pod \"dnsmasq-dns-6d84fc7bc9-gzscs\" (UID: \"3ae156b9-e4ff-4442-af05-16a5c951c5e0\") " pod="openstack/dnsmasq-dns-6d84fc7bc9-gzscs" Jan 05 23:21:14 crc kubenswrapper[4910]: I0105 23:21:14.103170 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ae156b9-e4ff-4442-af05-16a5c951c5e0-config\") pod \"dnsmasq-dns-6d84fc7bc9-gzscs\" (UID: \"3ae156b9-e4ff-4442-af05-16a5c951c5e0\") " pod="openstack/dnsmasq-dns-6d84fc7bc9-gzscs" Jan 05 23:21:14 crc kubenswrapper[4910]: I0105 23:21:14.108310 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ae156b9-e4ff-4442-af05-16a5c951c5e0-config\") pod \"dnsmasq-dns-6d84fc7bc9-gzscs\" (UID: \"3ae156b9-e4ff-4442-af05-16a5c951c5e0\") " pod="openstack/dnsmasq-dns-6d84fc7bc9-gzscs" Jan 05 23:21:14 crc kubenswrapper[4910]: I0105 23:21:14.108659 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3ae156b9-e4ff-4442-af05-16a5c951c5e0-ovsdbserver-sb\") pod \"dnsmasq-dns-6d84fc7bc9-gzscs\" (UID: \"3ae156b9-e4ff-4442-af05-16a5c951c5e0\") " pod="openstack/dnsmasq-dns-6d84fc7bc9-gzscs" Jan 05 23:21:14 crc kubenswrapper[4910]: I0105 23:21:14.140162 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zq7gw\" (UniqueName: \"kubernetes.io/projected/3ae156b9-e4ff-4442-af05-16a5c951c5e0-kube-api-access-zq7gw\") pod \"dnsmasq-dns-6d84fc7bc9-gzscs\" (UID: \"3ae156b9-e4ff-4442-af05-16a5c951c5e0\") " pod="openstack/dnsmasq-dns-6d84fc7bc9-gzscs" Jan 05 23:21:14 crc kubenswrapper[4910]: I0105 23:21:14.212045 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af4ff390-1ab3-42d8-be1f-126a38d4b313-combined-ca-bundle\") pod \"barbican-api-6cdf5d85b6-8zbr5\" (UID: \"af4ff390-1ab3-42d8-be1f-126a38d4b313\") " pod="openstack/barbican-api-6cdf5d85b6-8zbr5" Jan 05 23:21:14 crc kubenswrapper[4910]: I0105 23:21:14.212135 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/af4ff390-1ab3-42d8-be1f-126a38d4b313-config-data-custom\") pod \"barbican-api-6cdf5d85b6-8zbr5\" (UID: \"af4ff390-1ab3-42d8-be1f-126a38d4b313\") " pod="openstack/barbican-api-6cdf5d85b6-8zbr5" Jan 05 23:21:14 crc kubenswrapper[4910]: I0105 23:21:14.212174 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/af4ff390-1ab3-42d8-be1f-126a38d4b313-logs\") pod \"barbican-api-6cdf5d85b6-8zbr5\" (UID: \"af4ff390-1ab3-42d8-be1f-126a38d4b313\") " pod="openstack/barbican-api-6cdf5d85b6-8zbr5" Jan 05 23:21:14 crc kubenswrapper[4910]: I0105 23:21:14.212210 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af4ff390-1ab3-42d8-be1f-126a38d4b313-config-data\") pod \"barbican-api-6cdf5d85b6-8zbr5\" (UID: \"af4ff390-1ab3-42d8-be1f-126a38d4b313\") " pod="openstack/barbican-api-6cdf5d85b6-8zbr5" Jan 05 23:21:14 crc kubenswrapper[4910]: I0105 23:21:14.212283 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8ldz7\" (UniqueName: \"kubernetes.io/projected/af4ff390-1ab3-42d8-be1f-126a38d4b313-kube-api-access-8ldz7\") pod \"barbican-api-6cdf5d85b6-8zbr5\" (UID: \"af4ff390-1ab3-42d8-be1f-126a38d4b313\") " pod="openstack/barbican-api-6cdf5d85b6-8zbr5" Jan 05 23:21:14 crc kubenswrapper[4910]: I0105 23:21:14.216626 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/af4ff390-1ab3-42d8-be1f-126a38d4b313-logs\") pod \"barbican-api-6cdf5d85b6-8zbr5\" (UID: \"af4ff390-1ab3-42d8-be1f-126a38d4b313\") " pod="openstack/barbican-api-6cdf5d85b6-8zbr5" Jan 05 23:21:14 crc kubenswrapper[4910]: I0105 23:21:14.216865 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/af4ff390-1ab3-42d8-be1f-126a38d4b313-config-data-custom\") pod \"barbican-api-6cdf5d85b6-8zbr5\" (UID: \"af4ff390-1ab3-42d8-be1f-126a38d4b313\") " pod="openstack/barbican-api-6cdf5d85b6-8zbr5" Jan 05 23:21:14 crc kubenswrapper[4910]: I0105 23:21:14.217450 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af4ff390-1ab3-42d8-be1f-126a38d4b313-combined-ca-bundle\") pod \"barbican-api-6cdf5d85b6-8zbr5\" (UID: \"af4ff390-1ab3-42d8-be1f-126a38d4b313\") " pod="openstack/barbican-api-6cdf5d85b6-8zbr5" Jan 05 23:21:14 crc kubenswrapper[4910]: I0105 23:21:14.218829 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af4ff390-1ab3-42d8-be1f-126a38d4b313-config-data\") pod \"barbican-api-6cdf5d85b6-8zbr5\" (UID: \"af4ff390-1ab3-42d8-be1f-126a38d4b313\") " pod="openstack/barbican-api-6cdf5d85b6-8zbr5" Jan 05 23:21:14 crc kubenswrapper[4910]: I0105 23:21:14.230830 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ldz7\" (UniqueName: \"kubernetes.io/projected/af4ff390-1ab3-42d8-be1f-126a38d4b313-kube-api-access-8ldz7\") pod \"barbican-api-6cdf5d85b6-8zbr5\" (UID: \"af4ff390-1ab3-42d8-be1f-126a38d4b313\") " pod="openstack/barbican-api-6cdf5d85b6-8zbr5" Jan 05 23:21:14 crc kubenswrapper[4910]: I0105 23:21:14.243777 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d84fc7bc9-gzscs" Jan 05 23:21:14 crc kubenswrapper[4910]: I0105 23:21:14.279212 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6cdf5d85b6-8zbr5" Jan 05 23:21:14 crc kubenswrapper[4910]: I0105 23:21:14.481466 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-67d484f698-cn4m6"] Jan 05 23:21:14 crc kubenswrapper[4910]: I0105 23:21:14.598065 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-574b95949c-7xl5m"] Jan 05 23:21:14 crc kubenswrapper[4910]: W0105 23:21:14.610479 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod753baa24_890a_44bb_9e90_78afd9665bfa.slice/crio-4abcbbe1b181972a1d3f2eac493bc72d40f375b541ee3862cf0c21caf6a019ea WatchSource:0}: Error finding container 4abcbbe1b181972a1d3f2eac493bc72d40f375b541ee3862cf0c21caf6a019ea: Status 404 returned error can't find the container with id 4abcbbe1b181972a1d3f2eac493bc72d40f375b541ee3862cf0c21caf6a019ea Jan 05 23:21:14 crc kubenswrapper[4910]: I0105 23:21:14.673667 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6cdf5d85b6-8zbr5"] Jan 05 23:21:14 crc kubenswrapper[4910]: I0105 23:21:14.769220 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d84fc7bc9-gzscs"] Jan 05 23:21:15 crc kubenswrapper[4910]: I0105 23:21:15.170973 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6cdf5d85b6-8zbr5" event={"ID":"af4ff390-1ab3-42d8-be1f-126a38d4b313","Type":"ContainerStarted","Data":"877673ff8b69fca45803e373c493ceedb533aef27545df022191c41b80d86837"} Jan 05 23:21:15 crc kubenswrapper[4910]: I0105 23:21:15.171430 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6cdf5d85b6-8zbr5" event={"ID":"af4ff390-1ab3-42d8-be1f-126a38d4b313","Type":"ContainerStarted","Data":"b7b8f116b2c463ec2ee7f794a78ff68311b9fe287ed07664cb9a61ad09c0393f"} Jan 05 23:21:15 crc kubenswrapper[4910]: I0105 23:21:15.175239 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-67d484f698-cn4m6" event={"ID":"97fb16df-a478-4676-bab1-3eb2033abed6","Type":"ContainerStarted","Data":"47bb578193272818ff42d37a5ea9cbe04c3881946910b02658dc4cd2044728ee"} Jan 05 23:21:15 crc kubenswrapper[4910]: I0105 23:21:15.175333 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-67d484f698-cn4m6" event={"ID":"97fb16df-a478-4676-bab1-3eb2033abed6","Type":"ContainerStarted","Data":"36bdd1e7c9e74a907029b937f7ac0b5097e8a1b48619b4446631ac5359f0e7ff"} Jan 05 23:21:15 crc kubenswrapper[4910]: I0105 23:21:15.175344 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-67d484f698-cn4m6" event={"ID":"97fb16df-a478-4676-bab1-3eb2033abed6","Type":"ContainerStarted","Data":"ab705b4453c5f7004ca45e851a1b277e419a57fda5217ad61067dc02e72187c5"} Jan 05 23:21:15 crc kubenswrapper[4910]: I0105 23:21:15.178565 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-574b95949c-7xl5m" event={"ID":"753baa24-890a-44bb-9e90-78afd9665bfa","Type":"ContainerStarted","Data":"fea4284f7f642c24f34883c0847711e353dc74f3184705e7d8e450cce8f871f7"} Jan 05 23:21:15 crc kubenswrapper[4910]: I0105 23:21:15.178635 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-574b95949c-7xl5m" event={"ID":"753baa24-890a-44bb-9e90-78afd9665bfa","Type":"ContainerStarted","Data":"4abcbbe1b181972a1d3f2eac493bc72d40f375b541ee3862cf0c21caf6a019ea"} Jan 05 23:21:15 crc kubenswrapper[4910]: I0105 23:21:15.182305 4910 generic.go:334] "Generic (PLEG): container finished" podID="3ae156b9-e4ff-4442-af05-16a5c951c5e0" containerID="f96eb75999b54670ffe1aed60c8a515017be8033a6cd70804269b2d08d2af581" exitCode=0 Jan 05 23:21:15 crc kubenswrapper[4910]: I0105 23:21:15.182341 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d84fc7bc9-gzscs" event={"ID":"3ae156b9-e4ff-4442-af05-16a5c951c5e0","Type":"ContainerDied","Data":"f96eb75999b54670ffe1aed60c8a515017be8033a6cd70804269b2d08d2af581"} Jan 05 23:21:15 crc kubenswrapper[4910]: I0105 23:21:15.182364 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d84fc7bc9-gzscs" event={"ID":"3ae156b9-e4ff-4442-af05-16a5c951c5e0","Type":"ContainerStarted","Data":"67cc186de59f52e4fb9931620196122ff9bf53dd1596ffaa6fb66a5a4d769808"} Jan 05 23:21:15 crc kubenswrapper[4910]: I0105 23:21:15.201299 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-67d484f698-cn4m6" podStartSLOduration=2.201279525 podStartE2EDuration="2.201279525s" podCreationTimestamp="2026-01-05 23:21:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:21:15.200995738 +0000 UTC m=+5406.778493408" watchObservedRunningTime="2026-01-05 23:21:15.201279525 +0000 UTC m=+5406.778777205" Jan 05 23:21:16 crc kubenswrapper[4910]: I0105 23:21:16.204748 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-574b95949c-7xl5m" event={"ID":"753baa24-890a-44bb-9e90-78afd9665bfa","Type":"ContainerStarted","Data":"2d0809696bfd8967e105aa65773c9fff2cf1d5763da3b2573f12960fdc9f0e05"} Jan 05 23:21:16 crc kubenswrapper[4910]: I0105 23:21:16.212289 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d84fc7bc9-gzscs" event={"ID":"3ae156b9-e4ff-4442-af05-16a5c951c5e0","Type":"ContainerStarted","Data":"47caaf867ba63c0742a6dddc5f2ba2efd25c18010801c132c9942b65686f4e9e"} Jan 05 23:21:16 crc kubenswrapper[4910]: I0105 23:21:16.215335 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6d84fc7bc9-gzscs" Jan 05 23:21:16 crc kubenswrapper[4910]: I0105 23:21:16.231540 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6cdf5d85b6-8zbr5" event={"ID":"af4ff390-1ab3-42d8-be1f-126a38d4b313","Type":"ContainerStarted","Data":"eb27260ce95700de2108dbb6522b9ebc53735dc3c58904ed2fb36eb94953c023"} Jan 05 23:21:16 crc kubenswrapper[4910]: I0105 23:21:16.231673 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6cdf5d85b6-8zbr5" Jan 05 23:21:16 crc kubenswrapper[4910]: I0105 23:21:16.231698 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6cdf5d85b6-8zbr5" Jan 05 23:21:16 crc kubenswrapper[4910]: I0105 23:21:16.235891 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-574b95949c-7xl5m" podStartSLOduration=3.23585962 podStartE2EDuration="3.23585962s" podCreationTimestamp="2026-01-05 23:21:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:21:16.232003785 +0000 UTC m=+5407.809501465" watchObservedRunningTime="2026-01-05 23:21:16.23585962 +0000 UTC m=+5407.813357320" Jan 05 23:21:16 crc kubenswrapper[4910]: I0105 23:21:16.265539 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6d84fc7bc9-gzscs" podStartSLOduration=3.265508654 podStartE2EDuration="3.265508654s" podCreationTimestamp="2026-01-05 23:21:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:21:16.259963177 +0000 UTC m=+5407.837460847" watchObservedRunningTime="2026-01-05 23:21:16.265508654 +0000 UTC m=+5407.843006334" Jan 05 23:21:16 crc kubenswrapper[4910]: I0105 23:21:16.289769 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-6cdf5d85b6-8zbr5" podStartSLOduration=3.289739485 podStartE2EDuration="3.289739485s" podCreationTimestamp="2026-01-05 23:21:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:21:16.287075709 +0000 UTC m=+5407.864573379" watchObservedRunningTime="2026-01-05 23:21:16.289739485 +0000 UTC m=+5407.867237155" Jan 05 23:21:20 crc kubenswrapper[4910]: I0105 23:21:20.722826 4910 scope.go:117] "RemoveContainer" containerID="c31f0b3ea3ce0d4b0fb62da87fc6d8144c3940f5821592c646ec514147dae31e" Jan 05 23:21:20 crc kubenswrapper[4910]: E0105 23:21:20.723954 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:21:20 crc kubenswrapper[4910]: I0105 23:21:20.763304 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6cdf5d85b6-8zbr5" Jan 05 23:21:21 crc kubenswrapper[4910]: I0105 23:21:21.929081 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-j699f"] Jan 05 23:21:21 crc kubenswrapper[4910]: I0105 23:21:21.931487 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j699f" Jan 05 23:21:21 crc kubenswrapper[4910]: I0105 23:21:21.943020 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-j699f"] Jan 05 23:21:22 crc kubenswrapper[4910]: I0105 23:21:22.090754 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rkz9n\" (UniqueName: \"kubernetes.io/projected/100be155-2cf7-4bdb-9587-b13477dacd7c-kube-api-access-rkz9n\") pod \"redhat-marketplace-j699f\" (UID: \"100be155-2cf7-4bdb-9587-b13477dacd7c\") " pod="openshift-marketplace/redhat-marketplace-j699f" Jan 05 23:21:22 crc kubenswrapper[4910]: I0105 23:21:22.090977 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/100be155-2cf7-4bdb-9587-b13477dacd7c-utilities\") pod \"redhat-marketplace-j699f\" (UID: \"100be155-2cf7-4bdb-9587-b13477dacd7c\") " pod="openshift-marketplace/redhat-marketplace-j699f" Jan 05 23:21:22 crc kubenswrapper[4910]: I0105 23:21:22.091044 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/100be155-2cf7-4bdb-9587-b13477dacd7c-catalog-content\") pod \"redhat-marketplace-j699f\" (UID: \"100be155-2cf7-4bdb-9587-b13477dacd7c\") " pod="openshift-marketplace/redhat-marketplace-j699f" Jan 05 23:21:22 crc kubenswrapper[4910]: I0105 23:21:22.192665 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rkz9n\" (UniqueName: \"kubernetes.io/projected/100be155-2cf7-4bdb-9587-b13477dacd7c-kube-api-access-rkz9n\") pod \"redhat-marketplace-j699f\" (UID: \"100be155-2cf7-4bdb-9587-b13477dacd7c\") " pod="openshift-marketplace/redhat-marketplace-j699f" Jan 05 23:21:22 crc kubenswrapper[4910]: I0105 23:21:22.192800 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/100be155-2cf7-4bdb-9587-b13477dacd7c-utilities\") pod \"redhat-marketplace-j699f\" (UID: \"100be155-2cf7-4bdb-9587-b13477dacd7c\") " pod="openshift-marketplace/redhat-marketplace-j699f" Jan 05 23:21:22 crc kubenswrapper[4910]: I0105 23:21:22.192846 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/100be155-2cf7-4bdb-9587-b13477dacd7c-catalog-content\") pod \"redhat-marketplace-j699f\" (UID: \"100be155-2cf7-4bdb-9587-b13477dacd7c\") " pod="openshift-marketplace/redhat-marketplace-j699f" Jan 05 23:21:22 crc kubenswrapper[4910]: I0105 23:21:22.193412 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/100be155-2cf7-4bdb-9587-b13477dacd7c-catalog-content\") pod \"redhat-marketplace-j699f\" (UID: \"100be155-2cf7-4bdb-9587-b13477dacd7c\") " pod="openshift-marketplace/redhat-marketplace-j699f" Jan 05 23:21:22 crc kubenswrapper[4910]: I0105 23:21:22.193606 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/100be155-2cf7-4bdb-9587-b13477dacd7c-utilities\") pod \"redhat-marketplace-j699f\" (UID: \"100be155-2cf7-4bdb-9587-b13477dacd7c\") " pod="openshift-marketplace/redhat-marketplace-j699f" Jan 05 23:21:22 crc kubenswrapper[4910]: I0105 23:21:22.217439 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rkz9n\" (UniqueName: \"kubernetes.io/projected/100be155-2cf7-4bdb-9587-b13477dacd7c-kube-api-access-rkz9n\") pod \"redhat-marketplace-j699f\" (UID: \"100be155-2cf7-4bdb-9587-b13477dacd7c\") " pod="openshift-marketplace/redhat-marketplace-j699f" Jan 05 23:21:22 crc kubenswrapper[4910]: I0105 23:21:22.280059 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j699f" Jan 05 23:21:22 crc kubenswrapper[4910]: I0105 23:21:22.488165 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6cdf5d85b6-8zbr5" Jan 05 23:21:22 crc kubenswrapper[4910]: I0105 23:21:22.832644 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-j699f"] Jan 05 23:21:23 crc kubenswrapper[4910]: I0105 23:21:23.289700 4910 generic.go:334] "Generic (PLEG): container finished" podID="100be155-2cf7-4bdb-9587-b13477dacd7c" containerID="ec74be4609a068ef691d85b5e349db1086ab5a43cb88f94238c9d72584915c1c" exitCode=0 Jan 05 23:21:23 crc kubenswrapper[4910]: I0105 23:21:23.290107 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j699f" event={"ID":"100be155-2cf7-4bdb-9587-b13477dacd7c","Type":"ContainerDied","Data":"ec74be4609a068ef691d85b5e349db1086ab5a43cb88f94238c9d72584915c1c"} Jan 05 23:21:23 crc kubenswrapper[4910]: I0105 23:21:23.290182 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j699f" event={"ID":"100be155-2cf7-4bdb-9587-b13477dacd7c","Type":"ContainerStarted","Data":"6ef2e50798dc9268b9eac363937f360d001ea989c249ddfae5ac053354655a94"} Jan 05 23:21:24 crc kubenswrapper[4910]: I0105 23:21:24.246271 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6d84fc7bc9-gzscs" Jan 05 23:21:24 crc kubenswrapper[4910]: I0105 23:21:24.353447 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5cc49cdcf-q9cm4"] Jan 05 23:21:24 crc kubenswrapper[4910]: I0105 23:21:24.353754 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5cc49cdcf-q9cm4" podUID="1de3ba7c-4d47-4dec-bddb-d6f6da05f871" containerName="dnsmasq-dns" containerID="cri-o://cc1bb5fac787877bf70da3287ece341624bea5fed757fd009beec1e11dc80a79" gracePeriod=10 Jan 05 23:21:24 crc kubenswrapper[4910]: I0105 23:21:24.878843 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cc49cdcf-q9cm4" Jan 05 23:21:25 crc kubenswrapper[4910]: I0105 23:21:25.062176 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1de3ba7c-4d47-4dec-bddb-d6f6da05f871-config\") pod \"1de3ba7c-4d47-4dec-bddb-d6f6da05f871\" (UID: \"1de3ba7c-4d47-4dec-bddb-d6f6da05f871\") " Jan 05 23:21:25 crc kubenswrapper[4910]: I0105 23:21:25.062591 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7hst5\" (UniqueName: \"kubernetes.io/projected/1de3ba7c-4d47-4dec-bddb-d6f6da05f871-kube-api-access-7hst5\") pod \"1de3ba7c-4d47-4dec-bddb-d6f6da05f871\" (UID: \"1de3ba7c-4d47-4dec-bddb-d6f6da05f871\") " Jan 05 23:21:25 crc kubenswrapper[4910]: I0105 23:21:25.062621 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1de3ba7c-4d47-4dec-bddb-d6f6da05f871-ovsdbserver-sb\") pod \"1de3ba7c-4d47-4dec-bddb-d6f6da05f871\" (UID: \"1de3ba7c-4d47-4dec-bddb-d6f6da05f871\") " Jan 05 23:21:25 crc kubenswrapper[4910]: I0105 23:21:25.062680 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1de3ba7c-4d47-4dec-bddb-d6f6da05f871-dns-svc\") pod \"1de3ba7c-4d47-4dec-bddb-d6f6da05f871\" (UID: \"1de3ba7c-4d47-4dec-bddb-d6f6da05f871\") " Jan 05 23:21:25 crc kubenswrapper[4910]: I0105 23:21:25.062703 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1de3ba7c-4d47-4dec-bddb-d6f6da05f871-ovsdbserver-nb\") pod \"1de3ba7c-4d47-4dec-bddb-d6f6da05f871\" (UID: \"1de3ba7c-4d47-4dec-bddb-d6f6da05f871\") " Jan 05 23:21:25 crc kubenswrapper[4910]: I0105 23:21:25.083394 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1de3ba7c-4d47-4dec-bddb-d6f6da05f871-kube-api-access-7hst5" (OuterVolumeSpecName: "kube-api-access-7hst5") pod "1de3ba7c-4d47-4dec-bddb-d6f6da05f871" (UID: "1de3ba7c-4d47-4dec-bddb-d6f6da05f871"). InnerVolumeSpecName "kube-api-access-7hst5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:21:25 crc kubenswrapper[4910]: I0105 23:21:25.137312 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1de3ba7c-4d47-4dec-bddb-d6f6da05f871-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "1de3ba7c-4d47-4dec-bddb-d6f6da05f871" (UID: "1de3ba7c-4d47-4dec-bddb-d6f6da05f871"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:21:25 crc kubenswrapper[4910]: I0105 23:21:25.151305 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1de3ba7c-4d47-4dec-bddb-d6f6da05f871-config" (OuterVolumeSpecName: "config") pod "1de3ba7c-4d47-4dec-bddb-d6f6da05f871" (UID: "1de3ba7c-4d47-4dec-bddb-d6f6da05f871"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:21:25 crc kubenswrapper[4910]: I0105 23:21:25.153340 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1de3ba7c-4d47-4dec-bddb-d6f6da05f871-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1de3ba7c-4d47-4dec-bddb-d6f6da05f871" (UID: "1de3ba7c-4d47-4dec-bddb-d6f6da05f871"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:21:25 crc kubenswrapper[4910]: I0105 23:21:25.154453 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1de3ba7c-4d47-4dec-bddb-d6f6da05f871-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "1de3ba7c-4d47-4dec-bddb-d6f6da05f871" (UID: "1de3ba7c-4d47-4dec-bddb-d6f6da05f871"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:21:25 crc kubenswrapper[4910]: I0105 23:21:25.164646 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7hst5\" (UniqueName: \"kubernetes.io/projected/1de3ba7c-4d47-4dec-bddb-d6f6da05f871-kube-api-access-7hst5\") on node \"crc\" DevicePath \"\"" Jan 05 23:21:25 crc kubenswrapper[4910]: I0105 23:21:25.164672 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1de3ba7c-4d47-4dec-bddb-d6f6da05f871-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 05 23:21:25 crc kubenswrapper[4910]: I0105 23:21:25.164683 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1de3ba7c-4d47-4dec-bddb-d6f6da05f871-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 05 23:21:25 crc kubenswrapper[4910]: I0105 23:21:25.164693 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1de3ba7c-4d47-4dec-bddb-d6f6da05f871-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 05 23:21:25 crc kubenswrapper[4910]: I0105 23:21:25.164703 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1de3ba7c-4d47-4dec-bddb-d6f6da05f871-config\") on node \"crc\" DevicePath \"\"" Jan 05 23:21:25 crc kubenswrapper[4910]: I0105 23:21:25.317701 4910 generic.go:334] "Generic (PLEG): container finished" podID="1de3ba7c-4d47-4dec-bddb-d6f6da05f871" containerID="cc1bb5fac787877bf70da3287ece341624bea5fed757fd009beec1e11dc80a79" exitCode=0 Jan 05 23:21:25 crc kubenswrapper[4910]: I0105 23:21:25.317768 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cc49cdcf-q9cm4" event={"ID":"1de3ba7c-4d47-4dec-bddb-d6f6da05f871","Type":"ContainerDied","Data":"cc1bb5fac787877bf70da3287ece341624bea5fed757fd009beec1e11dc80a79"} Jan 05 23:21:25 crc kubenswrapper[4910]: I0105 23:21:25.317799 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cc49cdcf-q9cm4" event={"ID":"1de3ba7c-4d47-4dec-bddb-d6f6da05f871","Type":"ContainerDied","Data":"7fb1c418cf94deabd7f886a672d6255444a0d932c814f8b0cc9b5d58d1500c4c"} Jan 05 23:21:25 crc kubenswrapper[4910]: I0105 23:21:25.317819 4910 scope.go:117] "RemoveContainer" containerID="cc1bb5fac787877bf70da3287ece341624bea5fed757fd009beec1e11dc80a79" Jan 05 23:21:25 crc kubenswrapper[4910]: I0105 23:21:25.317935 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cc49cdcf-q9cm4" Jan 05 23:21:25 crc kubenswrapper[4910]: I0105 23:21:25.332981 4910 generic.go:334] "Generic (PLEG): container finished" podID="100be155-2cf7-4bdb-9587-b13477dacd7c" containerID="a038f4c2442866bf8930d0a404c1f71c28e2103d26f75ca849d4a601be102032" exitCode=0 Jan 05 23:21:25 crc kubenswrapper[4910]: I0105 23:21:25.333062 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j699f" event={"ID":"100be155-2cf7-4bdb-9587-b13477dacd7c","Type":"ContainerDied","Data":"a038f4c2442866bf8930d0a404c1f71c28e2103d26f75ca849d4a601be102032"} Jan 05 23:21:25 crc kubenswrapper[4910]: I0105 23:21:25.357820 4910 scope.go:117] "RemoveContainer" containerID="4d138ace8cf7535bc6bd25cc8054ba26fa479e739f9bac4d6f29589ff4b77714" Jan 05 23:21:25 crc kubenswrapper[4910]: I0105 23:21:25.395795 4910 scope.go:117] "RemoveContainer" containerID="cc1bb5fac787877bf70da3287ece341624bea5fed757fd009beec1e11dc80a79" Jan 05 23:21:25 crc kubenswrapper[4910]: E0105 23:21:25.396381 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc1bb5fac787877bf70da3287ece341624bea5fed757fd009beec1e11dc80a79\": container with ID starting with cc1bb5fac787877bf70da3287ece341624bea5fed757fd009beec1e11dc80a79 not found: ID does not exist" containerID="cc1bb5fac787877bf70da3287ece341624bea5fed757fd009beec1e11dc80a79" Jan 05 23:21:25 crc kubenswrapper[4910]: I0105 23:21:25.396426 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc1bb5fac787877bf70da3287ece341624bea5fed757fd009beec1e11dc80a79"} err="failed to get container status \"cc1bb5fac787877bf70da3287ece341624bea5fed757fd009beec1e11dc80a79\": rpc error: code = NotFound desc = could not find container \"cc1bb5fac787877bf70da3287ece341624bea5fed757fd009beec1e11dc80a79\": container with ID starting with cc1bb5fac787877bf70da3287ece341624bea5fed757fd009beec1e11dc80a79 not found: ID does not exist" Jan 05 23:21:25 crc kubenswrapper[4910]: I0105 23:21:25.396458 4910 scope.go:117] "RemoveContainer" containerID="4d138ace8cf7535bc6bd25cc8054ba26fa479e739f9bac4d6f29589ff4b77714" Jan 05 23:21:25 crc kubenswrapper[4910]: E0105 23:21:25.396823 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d138ace8cf7535bc6bd25cc8054ba26fa479e739f9bac4d6f29589ff4b77714\": container with ID starting with 4d138ace8cf7535bc6bd25cc8054ba26fa479e739f9bac4d6f29589ff4b77714 not found: ID does not exist" containerID="4d138ace8cf7535bc6bd25cc8054ba26fa479e739f9bac4d6f29589ff4b77714" Jan 05 23:21:25 crc kubenswrapper[4910]: I0105 23:21:25.396874 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d138ace8cf7535bc6bd25cc8054ba26fa479e739f9bac4d6f29589ff4b77714"} err="failed to get container status \"4d138ace8cf7535bc6bd25cc8054ba26fa479e739f9bac4d6f29589ff4b77714\": rpc error: code = NotFound desc = could not find container \"4d138ace8cf7535bc6bd25cc8054ba26fa479e739f9bac4d6f29589ff4b77714\": container with ID starting with 4d138ace8cf7535bc6bd25cc8054ba26fa479e739f9bac4d6f29589ff4b77714 not found: ID does not exist" Jan 05 23:21:25 crc kubenswrapper[4910]: I0105 23:21:25.402424 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5cc49cdcf-q9cm4"] Jan 05 23:21:25 crc kubenswrapper[4910]: I0105 23:21:25.409324 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5cc49cdcf-q9cm4"] Jan 05 23:21:26 crc kubenswrapper[4910]: I0105 23:21:26.345842 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j699f" event={"ID":"100be155-2cf7-4bdb-9587-b13477dacd7c","Type":"ContainerStarted","Data":"a07442c0256f30747c413b6954b15fd2825a0fb828413581ef6de5c857154b44"} Jan 05 23:21:26 crc kubenswrapper[4910]: I0105 23:21:26.365774 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-j699f" podStartSLOduration=2.877302672 podStartE2EDuration="5.365755135s" podCreationTimestamp="2026-01-05 23:21:21 +0000 UTC" firstStartedPulling="2026-01-05 23:21:23.294446976 +0000 UTC m=+5414.871944646" lastFinishedPulling="2026-01-05 23:21:25.782899409 +0000 UTC m=+5417.360397109" observedRunningTime="2026-01-05 23:21:26.364440783 +0000 UTC m=+5417.941938503" watchObservedRunningTime="2026-01-05 23:21:26.365755135 +0000 UTC m=+5417.943252825" Jan 05 23:21:26 crc kubenswrapper[4910]: I0105 23:21:26.736153 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1de3ba7c-4d47-4dec-bddb-d6f6da05f871" path="/var/lib/kubelet/pods/1de3ba7c-4d47-4dec-bddb-d6f6da05f871/volumes" Jan 05 23:21:29 crc kubenswrapper[4910]: I0105 23:21:29.754420 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5cc49cdcf-q9cm4" podUID="1de3ba7c-4d47-4dec-bddb-d6f6da05f871" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.1.22:5353: i/o timeout" Jan 05 23:21:32 crc kubenswrapper[4910]: I0105 23:21:32.281170 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-j699f" Jan 05 23:21:32 crc kubenswrapper[4910]: I0105 23:21:32.281594 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-j699f" Jan 05 23:21:32 crc kubenswrapper[4910]: I0105 23:21:32.335163 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-j699f" Jan 05 23:21:32 crc kubenswrapper[4910]: I0105 23:21:32.475866 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-j699f" Jan 05 23:21:32 crc kubenswrapper[4910]: I0105 23:21:32.585214 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-j699f"] Jan 05 23:21:34 crc kubenswrapper[4910]: I0105 23:21:34.443220 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-j699f" podUID="100be155-2cf7-4bdb-9587-b13477dacd7c" containerName="registry-server" containerID="cri-o://a07442c0256f30747c413b6954b15fd2825a0fb828413581ef6de5c857154b44" gracePeriod=2 Jan 05 23:21:34 crc kubenswrapper[4910]: I0105 23:21:34.954047 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j699f" Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.080642 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/100be155-2cf7-4bdb-9587-b13477dacd7c-utilities\") pod \"100be155-2cf7-4bdb-9587-b13477dacd7c\" (UID: \"100be155-2cf7-4bdb-9587-b13477dacd7c\") " Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.080770 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/100be155-2cf7-4bdb-9587-b13477dacd7c-catalog-content\") pod \"100be155-2cf7-4bdb-9587-b13477dacd7c\" (UID: \"100be155-2cf7-4bdb-9587-b13477dacd7c\") " Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.080982 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rkz9n\" (UniqueName: \"kubernetes.io/projected/100be155-2cf7-4bdb-9587-b13477dacd7c-kube-api-access-rkz9n\") pod \"100be155-2cf7-4bdb-9587-b13477dacd7c\" (UID: \"100be155-2cf7-4bdb-9587-b13477dacd7c\") " Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.082219 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/100be155-2cf7-4bdb-9587-b13477dacd7c-utilities" (OuterVolumeSpecName: "utilities") pod "100be155-2cf7-4bdb-9587-b13477dacd7c" (UID: "100be155-2cf7-4bdb-9587-b13477dacd7c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.088598 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/100be155-2cf7-4bdb-9587-b13477dacd7c-kube-api-access-rkz9n" (OuterVolumeSpecName: "kube-api-access-rkz9n") pod "100be155-2cf7-4bdb-9587-b13477dacd7c" (UID: "100be155-2cf7-4bdb-9587-b13477dacd7c"). InnerVolumeSpecName "kube-api-access-rkz9n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.108413 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/100be155-2cf7-4bdb-9587-b13477dacd7c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "100be155-2cf7-4bdb-9587-b13477dacd7c" (UID: "100be155-2cf7-4bdb-9587-b13477dacd7c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.184280 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rkz9n\" (UniqueName: \"kubernetes.io/projected/100be155-2cf7-4bdb-9587-b13477dacd7c-kube-api-access-rkz9n\") on node \"crc\" DevicePath \"\"" Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.184334 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/100be155-2cf7-4bdb-9587-b13477dacd7c-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.184350 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/100be155-2cf7-4bdb-9587-b13477dacd7c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.454882 4910 generic.go:334] "Generic (PLEG): container finished" podID="100be155-2cf7-4bdb-9587-b13477dacd7c" containerID="a07442c0256f30747c413b6954b15fd2825a0fb828413581ef6de5c857154b44" exitCode=0 Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.454955 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j699f" event={"ID":"100be155-2cf7-4bdb-9587-b13477dacd7c","Type":"ContainerDied","Data":"a07442c0256f30747c413b6954b15fd2825a0fb828413581ef6de5c857154b44"} Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.455365 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j699f" event={"ID":"100be155-2cf7-4bdb-9587-b13477dacd7c","Type":"ContainerDied","Data":"6ef2e50798dc9268b9eac363937f360d001ea989c249ddfae5ac053354655a94"} Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.455393 4910 scope.go:117] "RemoveContainer" containerID="a07442c0256f30747c413b6954b15fd2825a0fb828413581ef6de5c857154b44" Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.455028 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j699f" Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.483432 4910 scope.go:117] "RemoveContainer" containerID="a038f4c2442866bf8930d0a404c1f71c28e2103d26f75ca849d4a601be102032" Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.509797 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-j699f"] Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.530655 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-j699f"] Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.532765 4910 scope.go:117] "RemoveContainer" containerID="ec74be4609a068ef691d85b5e349db1086ab5a43cb88f94238c9d72584915c1c" Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.574165 4910 scope.go:117] "RemoveContainer" containerID="a07442c0256f30747c413b6954b15fd2825a0fb828413581ef6de5c857154b44" Jan 05 23:21:35 crc kubenswrapper[4910]: E0105 23:21:35.574760 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a07442c0256f30747c413b6954b15fd2825a0fb828413581ef6de5c857154b44\": container with ID starting with a07442c0256f30747c413b6954b15fd2825a0fb828413581ef6de5c857154b44 not found: ID does not exist" containerID="a07442c0256f30747c413b6954b15fd2825a0fb828413581ef6de5c857154b44" Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.574815 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a07442c0256f30747c413b6954b15fd2825a0fb828413581ef6de5c857154b44"} err="failed to get container status \"a07442c0256f30747c413b6954b15fd2825a0fb828413581ef6de5c857154b44\": rpc error: code = NotFound desc = could not find container \"a07442c0256f30747c413b6954b15fd2825a0fb828413581ef6de5c857154b44\": container with ID starting with a07442c0256f30747c413b6954b15fd2825a0fb828413581ef6de5c857154b44 not found: ID does not exist" Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.574851 4910 scope.go:117] "RemoveContainer" containerID="a038f4c2442866bf8930d0a404c1f71c28e2103d26f75ca849d4a601be102032" Jan 05 23:21:35 crc kubenswrapper[4910]: E0105 23:21:35.575707 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a038f4c2442866bf8930d0a404c1f71c28e2103d26f75ca849d4a601be102032\": container with ID starting with a038f4c2442866bf8930d0a404c1f71c28e2103d26f75ca849d4a601be102032 not found: ID does not exist" containerID="a038f4c2442866bf8930d0a404c1f71c28e2103d26f75ca849d4a601be102032" Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.575834 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a038f4c2442866bf8930d0a404c1f71c28e2103d26f75ca849d4a601be102032"} err="failed to get container status \"a038f4c2442866bf8930d0a404c1f71c28e2103d26f75ca849d4a601be102032\": rpc error: code = NotFound desc = could not find container \"a038f4c2442866bf8930d0a404c1f71c28e2103d26f75ca849d4a601be102032\": container with ID starting with a038f4c2442866bf8930d0a404c1f71c28e2103d26f75ca849d4a601be102032 not found: ID does not exist" Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.575913 4910 scope.go:117] "RemoveContainer" containerID="ec74be4609a068ef691d85b5e349db1086ab5a43cb88f94238c9d72584915c1c" Jan 05 23:21:35 crc kubenswrapper[4910]: E0105 23:21:35.576458 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ec74be4609a068ef691d85b5e349db1086ab5a43cb88f94238c9d72584915c1c\": container with ID starting with ec74be4609a068ef691d85b5e349db1086ab5a43cb88f94238c9d72584915c1c not found: ID does not exist" containerID="ec74be4609a068ef691d85b5e349db1086ab5a43cb88f94238c9d72584915c1c" Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.576483 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ec74be4609a068ef691d85b5e349db1086ab5a43cb88f94238c9d72584915c1c"} err="failed to get container status \"ec74be4609a068ef691d85b5e349db1086ab5a43cb88f94238c9d72584915c1c\": rpc error: code = NotFound desc = could not find container \"ec74be4609a068ef691d85b5e349db1086ab5a43cb88f94238c9d72584915c1c\": container with ID starting with ec74be4609a068ef691d85b5e349db1086ab5a43cb88f94238c9d72584915c1c not found: ID does not exist" Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.722197 4910 scope.go:117] "RemoveContainer" containerID="c31f0b3ea3ce0d4b0fb62da87fc6d8144c3940f5821592c646ec514147dae31e" Jan 05 23:21:35 crc kubenswrapper[4910]: E0105 23:21:35.722772 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.919554 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-km68x"] Jan 05 23:21:35 crc kubenswrapper[4910]: E0105 23:21:35.919867 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1de3ba7c-4d47-4dec-bddb-d6f6da05f871" containerName="init" Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.919879 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="1de3ba7c-4d47-4dec-bddb-d6f6da05f871" containerName="init" Jan 05 23:21:35 crc kubenswrapper[4910]: E0105 23:21:35.919894 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="100be155-2cf7-4bdb-9587-b13477dacd7c" containerName="registry-server" Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.919901 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="100be155-2cf7-4bdb-9587-b13477dacd7c" containerName="registry-server" Jan 05 23:21:35 crc kubenswrapper[4910]: E0105 23:21:35.919918 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="100be155-2cf7-4bdb-9587-b13477dacd7c" containerName="extract-utilities" Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.919925 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="100be155-2cf7-4bdb-9587-b13477dacd7c" containerName="extract-utilities" Jan 05 23:21:35 crc kubenswrapper[4910]: E0105 23:21:35.919935 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1de3ba7c-4d47-4dec-bddb-d6f6da05f871" containerName="dnsmasq-dns" Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.919941 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="1de3ba7c-4d47-4dec-bddb-d6f6da05f871" containerName="dnsmasq-dns" Jan 05 23:21:35 crc kubenswrapper[4910]: E0105 23:21:35.919955 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="100be155-2cf7-4bdb-9587-b13477dacd7c" containerName="extract-content" Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.919962 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="100be155-2cf7-4bdb-9587-b13477dacd7c" containerName="extract-content" Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.920134 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="1de3ba7c-4d47-4dec-bddb-d6f6da05f871" containerName="dnsmasq-dns" Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.920161 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="100be155-2cf7-4bdb-9587-b13477dacd7c" containerName="registry-server" Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.920685 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-km68x" Jan 05 23:21:35 crc kubenswrapper[4910]: I0105 23:21:35.933445 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-km68x"] Jan 05 23:21:36 crc kubenswrapper[4910]: I0105 23:21:36.007370 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/079de26f-9048-4f5e-b884-732587f9606b-operator-scripts\") pod \"neutron-db-create-km68x\" (UID: \"079de26f-9048-4f5e-b884-732587f9606b\") " pod="openstack/neutron-db-create-km68x" Jan 05 23:21:36 crc kubenswrapper[4910]: I0105 23:21:36.007892 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jvnwx\" (UniqueName: \"kubernetes.io/projected/079de26f-9048-4f5e-b884-732587f9606b-kube-api-access-jvnwx\") pod \"neutron-db-create-km68x\" (UID: \"079de26f-9048-4f5e-b884-732587f9606b\") " pod="openstack/neutron-db-create-km68x" Jan 05 23:21:36 crc kubenswrapper[4910]: I0105 23:21:36.037245 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-8db8-account-create-update-8zh22"] Jan 05 23:21:36 crc kubenswrapper[4910]: I0105 23:21:36.038287 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8db8-account-create-update-8zh22" Jan 05 23:21:36 crc kubenswrapper[4910]: I0105 23:21:36.040266 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Jan 05 23:21:36 crc kubenswrapper[4910]: I0105 23:21:36.056429 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-8db8-account-create-update-8zh22"] Jan 05 23:21:36 crc kubenswrapper[4910]: I0105 23:21:36.109270 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dkgkp\" (UniqueName: \"kubernetes.io/projected/1fc15311-6c7f-44f9-81b3-497c39223359-kube-api-access-dkgkp\") pod \"neutron-8db8-account-create-update-8zh22\" (UID: \"1fc15311-6c7f-44f9-81b3-497c39223359\") " pod="openstack/neutron-8db8-account-create-update-8zh22" Jan 05 23:21:36 crc kubenswrapper[4910]: I0105 23:21:36.109352 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/079de26f-9048-4f5e-b884-732587f9606b-operator-scripts\") pod \"neutron-db-create-km68x\" (UID: \"079de26f-9048-4f5e-b884-732587f9606b\") " pod="openstack/neutron-db-create-km68x" Jan 05 23:21:36 crc kubenswrapper[4910]: I0105 23:21:36.109549 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jvnwx\" (UniqueName: \"kubernetes.io/projected/079de26f-9048-4f5e-b884-732587f9606b-kube-api-access-jvnwx\") pod \"neutron-db-create-km68x\" (UID: \"079de26f-9048-4f5e-b884-732587f9606b\") " pod="openstack/neutron-db-create-km68x" Jan 05 23:21:36 crc kubenswrapper[4910]: I0105 23:21:36.109685 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1fc15311-6c7f-44f9-81b3-497c39223359-operator-scripts\") pod \"neutron-8db8-account-create-update-8zh22\" (UID: \"1fc15311-6c7f-44f9-81b3-497c39223359\") " pod="openstack/neutron-8db8-account-create-update-8zh22" Jan 05 23:21:36 crc kubenswrapper[4910]: I0105 23:21:36.110174 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/079de26f-9048-4f5e-b884-732587f9606b-operator-scripts\") pod \"neutron-db-create-km68x\" (UID: \"079de26f-9048-4f5e-b884-732587f9606b\") " pod="openstack/neutron-db-create-km68x" Jan 05 23:21:36 crc kubenswrapper[4910]: I0105 23:21:36.135879 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jvnwx\" (UniqueName: \"kubernetes.io/projected/079de26f-9048-4f5e-b884-732587f9606b-kube-api-access-jvnwx\") pod \"neutron-db-create-km68x\" (UID: \"079de26f-9048-4f5e-b884-732587f9606b\") " pod="openstack/neutron-db-create-km68x" Jan 05 23:21:36 crc kubenswrapper[4910]: I0105 23:21:36.211832 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1fc15311-6c7f-44f9-81b3-497c39223359-operator-scripts\") pod \"neutron-8db8-account-create-update-8zh22\" (UID: \"1fc15311-6c7f-44f9-81b3-497c39223359\") " pod="openstack/neutron-8db8-account-create-update-8zh22" Jan 05 23:21:36 crc kubenswrapper[4910]: I0105 23:21:36.212420 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dkgkp\" (UniqueName: \"kubernetes.io/projected/1fc15311-6c7f-44f9-81b3-497c39223359-kube-api-access-dkgkp\") pod \"neutron-8db8-account-create-update-8zh22\" (UID: \"1fc15311-6c7f-44f9-81b3-497c39223359\") " pod="openstack/neutron-8db8-account-create-update-8zh22" Jan 05 23:21:36 crc kubenswrapper[4910]: I0105 23:21:36.213206 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1fc15311-6c7f-44f9-81b3-497c39223359-operator-scripts\") pod \"neutron-8db8-account-create-update-8zh22\" (UID: \"1fc15311-6c7f-44f9-81b3-497c39223359\") " pod="openstack/neutron-8db8-account-create-update-8zh22" Jan 05 23:21:36 crc kubenswrapper[4910]: I0105 23:21:36.232187 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dkgkp\" (UniqueName: \"kubernetes.io/projected/1fc15311-6c7f-44f9-81b3-497c39223359-kube-api-access-dkgkp\") pod \"neutron-8db8-account-create-update-8zh22\" (UID: \"1fc15311-6c7f-44f9-81b3-497c39223359\") " pod="openstack/neutron-8db8-account-create-update-8zh22" Jan 05 23:21:36 crc kubenswrapper[4910]: I0105 23:21:36.281813 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-km68x" Jan 05 23:21:36 crc kubenswrapper[4910]: I0105 23:21:36.353399 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8db8-account-create-update-8zh22" Jan 05 23:21:36 crc kubenswrapper[4910]: I0105 23:21:36.743658 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="100be155-2cf7-4bdb-9587-b13477dacd7c" path="/var/lib/kubelet/pods/100be155-2cf7-4bdb-9587-b13477dacd7c/volumes" Jan 05 23:21:36 crc kubenswrapper[4910]: I0105 23:21:36.809204 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-km68x"] Jan 05 23:21:36 crc kubenswrapper[4910]: I0105 23:21:36.934972 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-8db8-account-create-update-8zh22"] Jan 05 23:21:37 crc kubenswrapper[4910]: I0105 23:21:37.497476 4910 generic.go:334] "Generic (PLEG): container finished" podID="1fc15311-6c7f-44f9-81b3-497c39223359" containerID="ad6b234209b0f06705f68322ef63cc28301787c506756148cfb089ccd9a68992" exitCode=0 Jan 05 23:21:37 crc kubenswrapper[4910]: I0105 23:21:37.497625 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8db8-account-create-update-8zh22" event={"ID":"1fc15311-6c7f-44f9-81b3-497c39223359","Type":"ContainerDied","Data":"ad6b234209b0f06705f68322ef63cc28301787c506756148cfb089ccd9a68992"} Jan 05 23:21:37 crc kubenswrapper[4910]: I0105 23:21:37.498099 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8db8-account-create-update-8zh22" event={"ID":"1fc15311-6c7f-44f9-81b3-497c39223359","Type":"ContainerStarted","Data":"855f7c44886757c0dd74e2be7ac89bb6f5284b8afa8ca3a08420957dfc1bc4b2"} Jan 05 23:21:37 crc kubenswrapper[4910]: I0105 23:21:37.500428 4910 generic.go:334] "Generic (PLEG): container finished" podID="079de26f-9048-4f5e-b884-732587f9606b" containerID="988d6ed10f626896fbfd1e282cfe271e921f7766df18c0e8a74e3fb9d05c2af2" exitCode=0 Jan 05 23:21:37 crc kubenswrapper[4910]: I0105 23:21:37.500563 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-km68x" event={"ID":"079de26f-9048-4f5e-b884-732587f9606b","Type":"ContainerDied","Data":"988d6ed10f626896fbfd1e282cfe271e921f7766df18c0e8a74e3fb9d05c2af2"} Jan 05 23:21:37 crc kubenswrapper[4910]: I0105 23:21:37.500618 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-km68x" event={"ID":"079de26f-9048-4f5e-b884-732587f9606b","Type":"ContainerStarted","Data":"48edee2247fe2c4c4d77cd20104b1370a8e2c9d9f6fdf7dbaedabcbcb71cd6d5"} Jan 05 23:21:38 crc kubenswrapper[4910]: I0105 23:21:38.966874 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8db8-account-create-update-8zh22" Jan 05 23:21:38 crc kubenswrapper[4910]: I0105 23:21:38.970076 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-km68x" Jan 05 23:21:39 crc kubenswrapper[4910]: I0105 23:21:39.087065 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1fc15311-6c7f-44f9-81b3-497c39223359-operator-scripts\") pod \"1fc15311-6c7f-44f9-81b3-497c39223359\" (UID: \"1fc15311-6c7f-44f9-81b3-497c39223359\") " Jan 05 23:21:39 crc kubenswrapper[4910]: I0105 23:21:39.087199 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dkgkp\" (UniqueName: \"kubernetes.io/projected/1fc15311-6c7f-44f9-81b3-497c39223359-kube-api-access-dkgkp\") pod \"1fc15311-6c7f-44f9-81b3-497c39223359\" (UID: \"1fc15311-6c7f-44f9-81b3-497c39223359\") " Jan 05 23:21:39 crc kubenswrapper[4910]: I0105 23:21:39.087248 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/079de26f-9048-4f5e-b884-732587f9606b-operator-scripts\") pod \"079de26f-9048-4f5e-b884-732587f9606b\" (UID: \"079de26f-9048-4f5e-b884-732587f9606b\") " Jan 05 23:21:39 crc kubenswrapper[4910]: I0105 23:21:39.087285 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jvnwx\" (UniqueName: \"kubernetes.io/projected/079de26f-9048-4f5e-b884-732587f9606b-kube-api-access-jvnwx\") pod \"079de26f-9048-4f5e-b884-732587f9606b\" (UID: \"079de26f-9048-4f5e-b884-732587f9606b\") " Jan 05 23:21:39 crc kubenswrapper[4910]: I0105 23:21:39.088227 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1fc15311-6c7f-44f9-81b3-497c39223359-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1fc15311-6c7f-44f9-81b3-497c39223359" (UID: "1fc15311-6c7f-44f9-81b3-497c39223359"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:21:39 crc kubenswrapper[4910]: I0105 23:21:39.088260 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/079de26f-9048-4f5e-b884-732587f9606b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "079de26f-9048-4f5e-b884-732587f9606b" (UID: "079de26f-9048-4f5e-b884-732587f9606b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:21:39 crc kubenswrapper[4910]: I0105 23:21:39.095226 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/079de26f-9048-4f5e-b884-732587f9606b-kube-api-access-jvnwx" (OuterVolumeSpecName: "kube-api-access-jvnwx") pod "079de26f-9048-4f5e-b884-732587f9606b" (UID: "079de26f-9048-4f5e-b884-732587f9606b"). InnerVolumeSpecName "kube-api-access-jvnwx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:21:39 crc kubenswrapper[4910]: I0105 23:21:39.096214 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1fc15311-6c7f-44f9-81b3-497c39223359-kube-api-access-dkgkp" (OuterVolumeSpecName: "kube-api-access-dkgkp") pod "1fc15311-6c7f-44f9-81b3-497c39223359" (UID: "1fc15311-6c7f-44f9-81b3-497c39223359"). InnerVolumeSpecName "kube-api-access-dkgkp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:21:39 crc kubenswrapper[4910]: I0105 23:21:39.189537 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1fc15311-6c7f-44f9-81b3-497c39223359-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:21:39 crc kubenswrapper[4910]: I0105 23:21:39.189607 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dkgkp\" (UniqueName: \"kubernetes.io/projected/1fc15311-6c7f-44f9-81b3-497c39223359-kube-api-access-dkgkp\") on node \"crc\" DevicePath \"\"" Jan 05 23:21:39 crc kubenswrapper[4910]: I0105 23:21:39.189627 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/079de26f-9048-4f5e-b884-732587f9606b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:21:39 crc kubenswrapper[4910]: I0105 23:21:39.189644 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jvnwx\" (UniqueName: \"kubernetes.io/projected/079de26f-9048-4f5e-b884-732587f9606b-kube-api-access-jvnwx\") on node \"crc\" DevicePath \"\"" Jan 05 23:21:39 crc kubenswrapper[4910]: I0105 23:21:39.540187 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-km68x" Jan 05 23:21:39 crc kubenswrapper[4910]: I0105 23:21:39.540209 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-km68x" event={"ID":"079de26f-9048-4f5e-b884-732587f9606b","Type":"ContainerDied","Data":"48edee2247fe2c4c4d77cd20104b1370a8e2c9d9f6fdf7dbaedabcbcb71cd6d5"} Jan 05 23:21:39 crc kubenswrapper[4910]: I0105 23:21:39.540936 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="48edee2247fe2c4c4d77cd20104b1370a8e2c9d9f6fdf7dbaedabcbcb71cd6d5" Jan 05 23:21:39 crc kubenswrapper[4910]: I0105 23:21:39.545062 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8db8-account-create-update-8zh22" event={"ID":"1fc15311-6c7f-44f9-81b3-497c39223359","Type":"ContainerDied","Data":"855f7c44886757c0dd74e2be7ac89bb6f5284b8afa8ca3a08420957dfc1bc4b2"} Jan 05 23:21:39 crc kubenswrapper[4910]: I0105 23:21:39.545098 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="855f7c44886757c0dd74e2be7ac89bb6f5284b8afa8ca3a08420957dfc1bc4b2" Jan 05 23:21:39 crc kubenswrapper[4910]: I0105 23:21:39.545167 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8db8-account-create-update-8zh22" Jan 05 23:21:41 crc kubenswrapper[4910]: I0105 23:21:41.381502 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-qm7fx"] Jan 05 23:21:41 crc kubenswrapper[4910]: E0105 23:21:41.382434 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1fc15311-6c7f-44f9-81b3-497c39223359" containerName="mariadb-account-create-update" Jan 05 23:21:41 crc kubenswrapper[4910]: I0105 23:21:41.382450 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="1fc15311-6c7f-44f9-81b3-497c39223359" containerName="mariadb-account-create-update" Jan 05 23:21:41 crc kubenswrapper[4910]: E0105 23:21:41.382464 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="079de26f-9048-4f5e-b884-732587f9606b" containerName="mariadb-database-create" Jan 05 23:21:41 crc kubenswrapper[4910]: I0105 23:21:41.382472 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="079de26f-9048-4f5e-b884-732587f9606b" containerName="mariadb-database-create" Jan 05 23:21:41 crc kubenswrapper[4910]: I0105 23:21:41.382654 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="079de26f-9048-4f5e-b884-732587f9606b" containerName="mariadb-database-create" Jan 05 23:21:41 crc kubenswrapper[4910]: I0105 23:21:41.382668 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="1fc15311-6c7f-44f9-81b3-497c39223359" containerName="mariadb-account-create-update" Jan 05 23:21:41 crc kubenswrapper[4910]: I0105 23:21:41.383345 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-qm7fx" Jan 05 23:21:41 crc kubenswrapper[4910]: I0105 23:21:41.387844 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 05 23:21:41 crc kubenswrapper[4910]: I0105 23:21:41.388098 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 05 23:21:41 crc kubenswrapper[4910]: I0105 23:21:41.388248 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-hprgw" Jan 05 23:21:41 crc kubenswrapper[4910]: I0105 23:21:41.409923 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-qm7fx"] Jan 05 23:21:41 crc kubenswrapper[4910]: I0105 23:21:41.431494 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/5b3d03b7-e9b1-4577-a181-f103c4121ecf-config\") pod \"neutron-db-sync-qm7fx\" (UID: \"5b3d03b7-e9b1-4577-a181-f103c4121ecf\") " pod="openstack/neutron-db-sync-qm7fx" Jan 05 23:21:41 crc kubenswrapper[4910]: I0105 23:21:41.431579 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-797v5\" (UniqueName: \"kubernetes.io/projected/5b3d03b7-e9b1-4577-a181-f103c4121ecf-kube-api-access-797v5\") pod \"neutron-db-sync-qm7fx\" (UID: \"5b3d03b7-e9b1-4577-a181-f103c4121ecf\") " pod="openstack/neutron-db-sync-qm7fx" Jan 05 23:21:41 crc kubenswrapper[4910]: I0105 23:21:41.431635 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b3d03b7-e9b1-4577-a181-f103c4121ecf-combined-ca-bundle\") pod \"neutron-db-sync-qm7fx\" (UID: \"5b3d03b7-e9b1-4577-a181-f103c4121ecf\") " pod="openstack/neutron-db-sync-qm7fx" Jan 05 23:21:41 crc kubenswrapper[4910]: I0105 23:21:41.533454 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b3d03b7-e9b1-4577-a181-f103c4121ecf-combined-ca-bundle\") pod \"neutron-db-sync-qm7fx\" (UID: \"5b3d03b7-e9b1-4577-a181-f103c4121ecf\") " pod="openstack/neutron-db-sync-qm7fx" Jan 05 23:21:41 crc kubenswrapper[4910]: I0105 23:21:41.533917 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/5b3d03b7-e9b1-4577-a181-f103c4121ecf-config\") pod \"neutron-db-sync-qm7fx\" (UID: \"5b3d03b7-e9b1-4577-a181-f103c4121ecf\") " pod="openstack/neutron-db-sync-qm7fx" Jan 05 23:21:41 crc kubenswrapper[4910]: I0105 23:21:41.534081 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-797v5\" (UniqueName: \"kubernetes.io/projected/5b3d03b7-e9b1-4577-a181-f103c4121ecf-kube-api-access-797v5\") pod \"neutron-db-sync-qm7fx\" (UID: \"5b3d03b7-e9b1-4577-a181-f103c4121ecf\") " pod="openstack/neutron-db-sync-qm7fx" Jan 05 23:21:41 crc kubenswrapper[4910]: I0105 23:21:41.540631 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/5b3d03b7-e9b1-4577-a181-f103c4121ecf-config\") pod \"neutron-db-sync-qm7fx\" (UID: \"5b3d03b7-e9b1-4577-a181-f103c4121ecf\") " pod="openstack/neutron-db-sync-qm7fx" Jan 05 23:21:41 crc kubenswrapper[4910]: I0105 23:21:41.543794 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b3d03b7-e9b1-4577-a181-f103c4121ecf-combined-ca-bundle\") pod \"neutron-db-sync-qm7fx\" (UID: \"5b3d03b7-e9b1-4577-a181-f103c4121ecf\") " pod="openstack/neutron-db-sync-qm7fx" Jan 05 23:21:41 crc kubenswrapper[4910]: I0105 23:21:41.552705 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-797v5\" (UniqueName: \"kubernetes.io/projected/5b3d03b7-e9b1-4577-a181-f103c4121ecf-kube-api-access-797v5\") pod \"neutron-db-sync-qm7fx\" (UID: \"5b3d03b7-e9b1-4577-a181-f103c4121ecf\") " pod="openstack/neutron-db-sync-qm7fx" Jan 05 23:21:41 crc kubenswrapper[4910]: I0105 23:21:41.708673 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-qm7fx" Jan 05 23:21:42 crc kubenswrapper[4910]: I0105 23:21:42.222781 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-qm7fx"] Jan 05 23:21:42 crc kubenswrapper[4910]: I0105 23:21:42.580300 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-qm7fx" event={"ID":"5b3d03b7-e9b1-4577-a181-f103c4121ecf","Type":"ContainerStarted","Data":"5e0732739ff1284f5f8e1dd3ae708133804fcfe53b1dfecb1053fed6370e748c"} Jan 05 23:21:42 crc kubenswrapper[4910]: I0105 23:21:42.581612 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-qm7fx" event={"ID":"5b3d03b7-e9b1-4577-a181-f103c4121ecf","Type":"ContainerStarted","Data":"e78c7face5517eae83cfc2ec85384b10c1565f23b570b0f5e60eb73723461f31"} Jan 05 23:21:42 crc kubenswrapper[4910]: I0105 23:21:42.609155 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-qm7fx" podStartSLOduration=1.609097107 podStartE2EDuration="1.609097107s" podCreationTimestamp="2026-01-05 23:21:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:21:42.601020387 +0000 UTC m=+5434.178518057" watchObservedRunningTime="2026-01-05 23:21:42.609097107 +0000 UTC m=+5434.186594787" Jan 05 23:21:45 crc kubenswrapper[4910]: I0105 23:21:45.285894 4910 scope.go:117] "RemoveContainer" containerID="1a5728d0c415528680c8cbfb04df75de89d4c288ef34d4ea5499256e5e827057" Jan 05 23:21:47 crc kubenswrapper[4910]: I0105 23:21:47.651296 4910 generic.go:334] "Generic (PLEG): container finished" podID="5b3d03b7-e9b1-4577-a181-f103c4121ecf" containerID="5e0732739ff1284f5f8e1dd3ae708133804fcfe53b1dfecb1053fed6370e748c" exitCode=0 Jan 05 23:21:47 crc kubenswrapper[4910]: I0105 23:21:47.651393 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-qm7fx" event={"ID":"5b3d03b7-e9b1-4577-a181-f103c4121ecf","Type":"ContainerDied","Data":"5e0732739ff1284f5f8e1dd3ae708133804fcfe53b1dfecb1053fed6370e748c"} Jan 05 23:21:49 crc kubenswrapper[4910]: I0105 23:21:49.114936 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-qm7fx" Jan 05 23:21:49 crc kubenswrapper[4910]: I0105 23:21:49.281070 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-797v5\" (UniqueName: \"kubernetes.io/projected/5b3d03b7-e9b1-4577-a181-f103c4121ecf-kube-api-access-797v5\") pod \"5b3d03b7-e9b1-4577-a181-f103c4121ecf\" (UID: \"5b3d03b7-e9b1-4577-a181-f103c4121ecf\") " Jan 05 23:21:49 crc kubenswrapper[4910]: I0105 23:21:49.281306 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b3d03b7-e9b1-4577-a181-f103c4121ecf-combined-ca-bundle\") pod \"5b3d03b7-e9b1-4577-a181-f103c4121ecf\" (UID: \"5b3d03b7-e9b1-4577-a181-f103c4121ecf\") " Jan 05 23:21:49 crc kubenswrapper[4910]: I0105 23:21:49.281553 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/5b3d03b7-e9b1-4577-a181-f103c4121ecf-config\") pod \"5b3d03b7-e9b1-4577-a181-f103c4121ecf\" (UID: \"5b3d03b7-e9b1-4577-a181-f103c4121ecf\") " Jan 05 23:21:49 crc kubenswrapper[4910]: I0105 23:21:49.302976 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b3d03b7-e9b1-4577-a181-f103c4121ecf-kube-api-access-797v5" (OuterVolumeSpecName: "kube-api-access-797v5") pod "5b3d03b7-e9b1-4577-a181-f103c4121ecf" (UID: "5b3d03b7-e9b1-4577-a181-f103c4121ecf"). InnerVolumeSpecName "kube-api-access-797v5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:21:49 crc kubenswrapper[4910]: I0105 23:21:49.315403 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b3d03b7-e9b1-4577-a181-f103c4121ecf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5b3d03b7-e9b1-4577-a181-f103c4121ecf" (UID: "5b3d03b7-e9b1-4577-a181-f103c4121ecf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:21:49 crc kubenswrapper[4910]: I0105 23:21:49.330876 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b3d03b7-e9b1-4577-a181-f103c4121ecf-config" (OuterVolumeSpecName: "config") pod "5b3d03b7-e9b1-4577-a181-f103c4121ecf" (UID: "5b3d03b7-e9b1-4577-a181-f103c4121ecf"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:21:49 crc kubenswrapper[4910]: I0105 23:21:49.384239 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-797v5\" (UniqueName: \"kubernetes.io/projected/5b3d03b7-e9b1-4577-a181-f103c4121ecf-kube-api-access-797v5\") on node \"crc\" DevicePath \"\"" Jan 05 23:21:49 crc kubenswrapper[4910]: I0105 23:21:49.384306 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b3d03b7-e9b1-4577-a181-f103c4121ecf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:21:49 crc kubenswrapper[4910]: I0105 23:21:49.384337 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/5b3d03b7-e9b1-4577-a181-f103c4121ecf-config\") on node \"crc\" DevicePath \"\"" Jan 05 23:21:49 crc kubenswrapper[4910]: I0105 23:21:49.694972 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-qm7fx" event={"ID":"5b3d03b7-e9b1-4577-a181-f103c4121ecf","Type":"ContainerDied","Data":"e78c7face5517eae83cfc2ec85384b10c1565f23b570b0f5e60eb73723461f31"} Jan 05 23:21:49 crc kubenswrapper[4910]: I0105 23:21:49.695038 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-qm7fx" Jan 05 23:21:49 crc kubenswrapper[4910]: I0105 23:21:49.695048 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e78c7face5517eae83cfc2ec85384b10c1565f23b570b0f5e60eb73723461f31" Jan 05 23:21:49 crc kubenswrapper[4910]: I0105 23:21:49.721431 4910 scope.go:117] "RemoveContainer" containerID="c31f0b3ea3ce0d4b0fb62da87fc6d8144c3940f5821592c646ec514147dae31e" Jan 05 23:21:49 crc kubenswrapper[4910]: E0105 23:21:49.723415 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:21:49 crc kubenswrapper[4910]: I0105 23:21:49.863072 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7c4486bb9f-r5zvb"] Jan 05 23:21:49 crc kubenswrapper[4910]: E0105 23:21:49.863674 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b3d03b7-e9b1-4577-a181-f103c4121ecf" containerName="neutron-db-sync" Jan 05 23:21:49 crc kubenswrapper[4910]: I0105 23:21:49.863746 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b3d03b7-e9b1-4577-a181-f103c4121ecf" containerName="neutron-db-sync" Jan 05 23:21:49 crc kubenswrapper[4910]: I0105 23:21:49.863957 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b3d03b7-e9b1-4577-a181-f103c4121ecf" containerName="neutron-db-sync" Jan 05 23:21:49 crc kubenswrapper[4910]: I0105 23:21:49.864850 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c4486bb9f-r5zvb" Jan 05 23:21:49 crc kubenswrapper[4910]: I0105 23:21:49.874947 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c4486bb9f-r5zvb"] Jan 05 23:21:49 crc kubenswrapper[4910]: I0105 23:21:49.929636 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-6bc969c685-87gv2"] Jan 05 23:21:49 crc kubenswrapper[4910]: I0105 23:21:49.931390 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6bc969c685-87gv2" Jan 05 23:21:49 crc kubenswrapper[4910]: I0105 23:21:49.935413 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 05 23:21:49 crc kubenswrapper[4910]: I0105 23:21:49.935481 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-hprgw" Jan 05 23:21:49 crc kubenswrapper[4910]: I0105 23:21:49.937086 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 05 23:21:49 crc kubenswrapper[4910]: I0105 23:21:49.950016 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6bc969c685-87gv2"] Jan 05 23:21:49 crc kubenswrapper[4910]: I0105 23:21:49.998044 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2mwt\" (UniqueName: \"kubernetes.io/projected/09af03d5-cfaf-400d-bfb2-bb08f1b57d45-kube-api-access-m2mwt\") pod \"dnsmasq-dns-7c4486bb9f-r5zvb\" (UID: \"09af03d5-cfaf-400d-bfb2-bb08f1b57d45\") " pod="openstack/dnsmasq-dns-7c4486bb9f-r5zvb" Jan 05 23:21:49 crc kubenswrapper[4910]: I0105 23:21:49.998102 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09af03d5-cfaf-400d-bfb2-bb08f1b57d45-dns-svc\") pod \"dnsmasq-dns-7c4486bb9f-r5zvb\" (UID: \"09af03d5-cfaf-400d-bfb2-bb08f1b57d45\") " pod="openstack/dnsmasq-dns-7c4486bb9f-r5zvb" Jan 05 23:21:49 crc kubenswrapper[4910]: I0105 23:21:49.998149 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/09af03d5-cfaf-400d-bfb2-bb08f1b57d45-ovsdbserver-nb\") pod \"dnsmasq-dns-7c4486bb9f-r5zvb\" (UID: \"09af03d5-cfaf-400d-bfb2-bb08f1b57d45\") " pod="openstack/dnsmasq-dns-7c4486bb9f-r5zvb" Jan 05 23:21:49 crc kubenswrapper[4910]: I0105 23:21:49.998219 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/09af03d5-cfaf-400d-bfb2-bb08f1b57d45-ovsdbserver-sb\") pod \"dnsmasq-dns-7c4486bb9f-r5zvb\" (UID: \"09af03d5-cfaf-400d-bfb2-bb08f1b57d45\") " pod="openstack/dnsmasq-dns-7c4486bb9f-r5zvb" Jan 05 23:21:49 crc kubenswrapper[4910]: I0105 23:21:49.998246 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09af03d5-cfaf-400d-bfb2-bb08f1b57d45-config\") pod \"dnsmasq-dns-7c4486bb9f-r5zvb\" (UID: \"09af03d5-cfaf-400d-bfb2-bb08f1b57d45\") " pod="openstack/dnsmasq-dns-7c4486bb9f-r5zvb" Jan 05 23:21:50 crc kubenswrapper[4910]: I0105 23:21:50.100108 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/d6bd3395-e6e2-441d-8181-122d421e5947-httpd-config\") pod \"neutron-6bc969c685-87gv2\" (UID: \"d6bd3395-e6e2-441d-8181-122d421e5947\") " pod="openstack/neutron-6bc969c685-87gv2" Jan 05 23:21:50 crc kubenswrapper[4910]: I0105 23:21:50.100424 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2mwt\" (UniqueName: \"kubernetes.io/projected/09af03d5-cfaf-400d-bfb2-bb08f1b57d45-kube-api-access-m2mwt\") pod \"dnsmasq-dns-7c4486bb9f-r5zvb\" (UID: \"09af03d5-cfaf-400d-bfb2-bb08f1b57d45\") " pod="openstack/dnsmasq-dns-7c4486bb9f-r5zvb" Jan 05 23:21:50 crc kubenswrapper[4910]: I0105 23:21:50.100485 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/d6bd3395-e6e2-441d-8181-122d421e5947-config\") pod \"neutron-6bc969c685-87gv2\" (UID: \"d6bd3395-e6e2-441d-8181-122d421e5947\") " pod="openstack/neutron-6bc969c685-87gv2" Jan 05 23:21:50 crc kubenswrapper[4910]: I0105 23:21:50.100522 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09af03d5-cfaf-400d-bfb2-bb08f1b57d45-dns-svc\") pod \"dnsmasq-dns-7c4486bb9f-r5zvb\" (UID: \"09af03d5-cfaf-400d-bfb2-bb08f1b57d45\") " pod="openstack/dnsmasq-dns-7c4486bb9f-r5zvb" Jan 05 23:21:50 crc kubenswrapper[4910]: I0105 23:21:50.100590 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/09af03d5-cfaf-400d-bfb2-bb08f1b57d45-ovsdbserver-nb\") pod \"dnsmasq-dns-7c4486bb9f-r5zvb\" (UID: \"09af03d5-cfaf-400d-bfb2-bb08f1b57d45\") " pod="openstack/dnsmasq-dns-7c4486bb9f-r5zvb" Jan 05 23:21:50 crc kubenswrapper[4910]: I0105 23:21:50.100788 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9kjj\" (UniqueName: \"kubernetes.io/projected/d6bd3395-e6e2-441d-8181-122d421e5947-kube-api-access-g9kjj\") pod \"neutron-6bc969c685-87gv2\" (UID: \"d6bd3395-e6e2-441d-8181-122d421e5947\") " pod="openstack/neutron-6bc969c685-87gv2" Jan 05 23:21:50 crc kubenswrapper[4910]: I0105 23:21:50.100974 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/09af03d5-cfaf-400d-bfb2-bb08f1b57d45-ovsdbserver-sb\") pod \"dnsmasq-dns-7c4486bb9f-r5zvb\" (UID: \"09af03d5-cfaf-400d-bfb2-bb08f1b57d45\") " pod="openstack/dnsmasq-dns-7c4486bb9f-r5zvb" Jan 05 23:21:50 crc kubenswrapper[4910]: I0105 23:21:50.101012 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09af03d5-cfaf-400d-bfb2-bb08f1b57d45-config\") pod \"dnsmasq-dns-7c4486bb9f-r5zvb\" (UID: \"09af03d5-cfaf-400d-bfb2-bb08f1b57d45\") " pod="openstack/dnsmasq-dns-7c4486bb9f-r5zvb" Jan 05 23:21:50 crc kubenswrapper[4910]: I0105 23:21:50.101079 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6bd3395-e6e2-441d-8181-122d421e5947-combined-ca-bundle\") pod \"neutron-6bc969c685-87gv2\" (UID: \"d6bd3395-e6e2-441d-8181-122d421e5947\") " pod="openstack/neutron-6bc969c685-87gv2" Jan 05 23:21:50 crc kubenswrapper[4910]: I0105 23:21:50.102454 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09af03d5-cfaf-400d-bfb2-bb08f1b57d45-dns-svc\") pod \"dnsmasq-dns-7c4486bb9f-r5zvb\" (UID: \"09af03d5-cfaf-400d-bfb2-bb08f1b57d45\") " pod="openstack/dnsmasq-dns-7c4486bb9f-r5zvb" Jan 05 23:21:50 crc kubenswrapper[4910]: I0105 23:21:50.102818 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/09af03d5-cfaf-400d-bfb2-bb08f1b57d45-ovsdbserver-nb\") pod \"dnsmasq-dns-7c4486bb9f-r5zvb\" (UID: \"09af03d5-cfaf-400d-bfb2-bb08f1b57d45\") " pod="openstack/dnsmasq-dns-7c4486bb9f-r5zvb" Jan 05 23:21:50 crc kubenswrapper[4910]: I0105 23:21:50.102832 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09af03d5-cfaf-400d-bfb2-bb08f1b57d45-config\") pod \"dnsmasq-dns-7c4486bb9f-r5zvb\" (UID: \"09af03d5-cfaf-400d-bfb2-bb08f1b57d45\") " pod="openstack/dnsmasq-dns-7c4486bb9f-r5zvb" Jan 05 23:21:50 crc kubenswrapper[4910]: I0105 23:21:50.102955 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/09af03d5-cfaf-400d-bfb2-bb08f1b57d45-ovsdbserver-sb\") pod \"dnsmasq-dns-7c4486bb9f-r5zvb\" (UID: \"09af03d5-cfaf-400d-bfb2-bb08f1b57d45\") " pod="openstack/dnsmasq-dns-7c4486bb9f-r5zvb" Jan 05 23:21:50 crc kubenswrapper[4910]: I0105 23:21:50.122069 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2mwt\" (UniqueName: \"kubernetes.io/projected/09af03d5-cfaf-400d-bfb2-bb08f1b57d45-kube-api-access-m2mwt\") pod \"dnsmasq-dns-7c4486bb9f-r5zvb\" (UID: \"09af03d5-cfaf-400d-bfb2-bb08f1b57d45\") " pod="openstack/dnsmasq-dns-7c4486bb9f-r5zvb" Jan 05 23:21:50 crc kubenswrapper[4910]: I0105 23:21:50.194163 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c4486bb9f-r5zvb" Jan 05 23:21:50 crc kubenswrapper[4910]: I0105 23:21:50.202355 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/d6bd3395-e6e2-441d-8181-122d421e5947-httpd-config\") pod \"neutron-6bc969c685-87gv2\" (UID: \"d6bd3395-e6e2-441d-8181-122d421e5947\") " pod="openstack/neutron-6bc969c685-87gv2" Jan 05 23:21:50 crc kubenswrapper[4910]: I0105 23:21:50.202447 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/d6bd3395-e6e2-441d-8181-122d421e5947-config\") pod \"neutron-6bc969c685-87gv2\" (UID: \"d6bd3395-e6e2-441d-8181-122d421e5947\") " pod="openstack/neutron-6bc969c685-87gv2" Jan 05 23:21:50 crc kubenswrapper[4910]: I0105 23:21:50.203188 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9kjj\" (UniqueName: \"kubernetes.io/projected/d6bd3395-e6e2-441d-8181-122d421e5947-kube-api-access-g9kjj\") pod \"neutron-6bc969c685-87gv2\" (UID: \"d6bd3395-e6e2-441d-8181-122d421e5947\") " pod="openstack/neutron-6bc969c685-87gv2" Jan 05 23:21:50 crc kubenswrapper[4910]: I0105 23:21:50.203248 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6bd3395-e6e2-441d-8181-122d421e5947-combined-ca-bundle\") pod \"neutron-6bc969c685-87gv2\" (UID: \"d6bd3395-e6e2-441d-8181-122d421e5947\") " pod="openstack/neutron-6bc969c685-87gv2" Jan 05 23:21:50 crc kubenswrapper[4910]: I0105 23:21:50.211558 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6bd3395-e6e2-441d-8181-122d421e5947-combined-ca-bundle\") pod \"neutron-6bc969c685-87gv2\" (UID: \"d6bd3395-e6e2-441d-8181-122d421e5947\") " pod="openstack/neutron-6bc969c685-87gv2" Jan 05 23:21:50 crc kubenswrapper[4910]: I0105 23:21:50.211988 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/d6bd3395-e6e2-441d-8181-122d421e5947-httpd-config\") pod \"neutron-6bc969c685-87gv2\" (UID: \"d6bd3395-e6e2-441d-8181-122d421e5947\") " pod="openstack/neutron-6bc969c685-87gv2" Jan 05 23:21:50 crc kubenswrapper[4910]: I0105 23:21:50.212331 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/d6bd3395-e6e2-441d-8181-122d421e5947-config\") pod \"neutron-6bc969c685-87gv2\" (UID: \"d6bd3395-e6e2-441d-8181-122d421e5947\") " pod="openstack/neutron-6bc969c685-87gv2" Jan 05 23:21:50 crc kubenswrapper[4910]: I0105 23:21:50.229866 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9kjj\" (UniqueName: \"kubernetes.io/projected/d6bd3395-e6e2-441d-8181-122d421e5947-kube-api-access-g9kjj\") pod \"neutron-6bc969c685-87gv2\" (UID: \"d6bd3395-e6e2-441d-8181-122d421e5947\") " pod="openstack/neutron-6bc969c685-87gv2" Jan 05 23:21:50 crc kubenswrapper[4910]: I0105 23:21:50.251316 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6bc969c685-87gv2" Jan 05 23:21:50 crc kubenswrapper[4910]: I0105 23:21:50.694135 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c4486bb9f-r5zvb"] Jan 05 23:21:50 crc kubenswrapper[4910]: I0105 23:21:50.932055 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6bc969c685-87gv2"] Jan 05 23:21:50 crc kubenswrapper[4910]: W0105 23:21:50.932068 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd6bd3395_e6e2_441d_8181_122d421e5947.slice/crio-166ea53c6bcbf7f3161376be38d875914b8750437b0963528a776e1a1b8acdeb WatchSource:0}: Error finding container 166ea53c6bcbf7f3161376be38d875914b8750437b0963528a776e1a1b8acdeb: Status 404 returned error can't find the container with id 166ea53c6bcbf7f3161376be38d875914b8750437b0963528a776e1a1b8acdeb Jan 05 23:21:51 crc kubenswrapper[4910]: I0105 23:21:51.720262 4910 generic.go:334] "Generic (PLEG): container finished" podID="09af03d5-cfaf-400d-bfb2-bb08f1b57d45" containerID="5f4fbc7211bda5403a5df8ad2b4263286f1d8b924076792a7f6a3b7759e1ea9b" exitCode=0 Jan 05 23:21:51 crc kubenswrapper[4910]: I0105 23:21:51.721221 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c4486bb9f-r5zvb" event={"ID":"09af03d5-cfaf-400d-bfb2-bb08f1b57d45","Type":"ContainerDied","Data":"5f4fbc7211bda5403a5df8ad2b4263286f1d8b924076792a7f6a3b7759e1ea9b"} Jan 05 23:21:51 crc kubenswrapper[4910]: I0105 23:21:51.721296 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c4486bb9f-r5zvb" event={"ID":"09af03d5-cfaf-400d-bfb2-bb08f1b57d45","Type":"ContainerStarted","Data":"7e6e56372f6f668cd31373c5f401d689ed93db54e2163ae2a0dddff1748aa17d"} Jan 05 23:21:51 crc kubenswrapper[4910]: I0105 23:21:51.725323 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6bc969c685-87gv2" event={"ID":"d6bd3395-e6e2-441d-8181-122d421e5947","Type":"ContainerStarted","Data":"2471fd42fa889f039bf74583646b11b937d47aefeb12a0c82a6f7aa2d17b73b3"} Jan 05 23:21:51 crc kubenswrapper[4910]: I0105 23:21:51.725387 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6bc969c685-87gv2" event={"ID":"d6bd3395-e6e2-441d-8181-122d421e5947","Type":"ContainerStarted","Data":"2273e7e6c97b973ee8ed99fa98e0c99d35d2183fbcab878e8d082b939ec5f809"} Jan 05 23:21:51 crc kubenswrapper[4910]: I0105 23:21:51.725402 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6bc969c685-87gv2" event={"ID":"d6bd3395-e6e2-441d-8181-122d421e5947","Type":"ContainerStarted","Data":"166ea53c6bcbf7f3161376be38d875914b8750437b0963528a776e1a1b8acdeb"} Jan 05 23:21:51 crc kubenswrapper[4910]: I0105 23:21:51.725539 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-6bc969c685-87gv2" Jan 05 23:21:51 crc kubenswrapper[4910]: I0105 23:21:51.779404 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-6bc969c685-87gv2" podStartSLOduration=2.779357674 podStartE2EDuration="2.779357674s" podCreationTimestamp="2026-01-05 23:21:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:21:51.767442709 +0000 UTC m=+5443.344940379" watchObservedRunningTime="2026-01-05 23:21:51.779357674 +0000 UTC m=+5443.356855434" Jan 05 23:21:52 crc kubenswrapper[4910]: I0105 23:21:52.740624 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c4486bb9f-r5zvb" event={"ID":"09af03d5-cfaf-400d-bfb2-bb08f1b57d45","Type":"ContainerStarted","Data":"b2961f100b2611bbd0afb9773d4dd510a3e79c06e8b1197fe9dcd5009238aa05"} Jan 05 23:21:52 crc kubenswrapper[4910]: I0105 23:21:52.768063 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7c4486bb9f-r5zvb" podStartSLOduration=3.7680400819999997 podStartE2EDuration="3.768040082s" podCreationTimestamp="2026-01-05 23:21:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:21:52.761928791 +0000 UTC m=+5444.339426501" watchObservedRunningTime="2026-01-05 23:21:52.768040082 +0000 UTC m=+5444.345537752" Jan 05 23:21:53 crc kubenswrapper[4910]: I0105 23:21:53.754733 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7c4486bb9f-r5zvb" Jan 05 23:22:00 crc kubenswrapper[4910]: I0105 23:22:00.196541 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7c4486bb9f-r5zvb" Jan 05 23:22:00 crc kubenswrapper[4910]: I0105 23:22:00.352430 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d84fc7bc9-gzscs"] Jan 05 23:22:00 crc kubenswrapper[4910]: I0105 23:22:00.352925 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6d84fc7bc9-gzscs" podUID="3ae156b9-e4ff-4442-af05-16a5c951c5e0" containerName="dnsmasq-dns" containerID="cri-o://47caaf867ba63c0742a6dddc5f2ba2efd25c18010801c132c9942b65686f4e9e" gracePeriod=10 Jan 05 23:22:00 crc kubenswrapper[4910]: I0105 23:22:00.826538 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d84fc7bc9-gzscs" Jan 05 23:22:00 crc kubenswrapper[4910]: I0105 23:22:00.880738 4910 generic.go:334] "Generic (PLEG): container finished" podID="3ae156b9-e4ff-4442-af05-16a5c951c5e0" containerID="47caaf867ba63c0742a6dddc5f2ba2efd25c18010801c132c9942b65686f4e9e" exitCode=0 Jan 05 23:22:00 crc kubenswrapper[4910]: I0105 23:22:00.880781 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d84fc7bc9-gzscs" event={"ID":"3ae156b9-e4ff-4442-af05-16a5c951c5e0","Type":"ContainerDied","Data":"47caaf867ba63c0742a6dddc5f2ba2efd25c18010801c132c9942b65686f4e9e"} Jan 05 23:22:00 crc kubenswrapper[4910]: I0105 23:22:00.880809 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d84fc7bc9-gzscs" event={"ID":"3ae156b9-e4ff-4442-af05-16a5c951c5e0","Type":"ContainerDied","Data":"67cc186de59f52e4fb9931620196122ff9bf53dd1596ffaa6fb66a5a4d769808"} Jan 05 23:22:00 crc kubenswrapper[4910]: I0105 23:22:00.880826 4910 scope.go:117] "RemoveContainer" containerID="47caaf867ba63c0742a6dddc5f2ba2efd25c18010801c132c9942b65686f4e9e" Jan 05 23:22:00 crc kubenswrapper[4910]: I0105 23:22:00.880885 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d84fc7bc9-gzscs" Jan 05 23:22:00 crc kubenswrapper[4910]: I0105 23:22:00.935458 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3ae156b9-e4ff-4442-af05-16a5c951c5e0-ovsdbserver-sb\") pod \"3ae156b9-e4ff-4442-af05-16a5c951c5e0\" (UID: \"3ae156b9-e4ff-4442-af05-16a5c951c5e0\") " Jan 05 23:22:00 crc kubenswrapper[4910]: I0105 23:22:00.935600 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zq7gw\" (UniqueName: \"kubernetes.io/projected/3ae156b9-e4ff-4442-af05-16a5c951c5e0-kube-api-access-zq7gw\") pod \"3ae156b9-e4ff-4442-af05-16a5c951c5e0\" (UID: \"3ae156b9-e4ff-4442-af05-16a5c951c5e0\") " Jan 05 23:22:00 crc kubenswrapper[4910]: I0105 23:22:00.936473 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3ae156b9-e4ff-4442-af05-16a5c951c5e0-dns-svc\") pod \"3ae156b9-e4ff-4442-af05-16a5c951c5e0\" (UID: \"3ae156b9-e4ff-4442-af05-16a5c951c5e0\") " Jan 05 23:22:00 crc kubenswrapper[4910]: I0105 23:22:00.936500 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3ae156b9-e4ff-4442-af05-16a5c951c5e0-ovsdbserver-nb\") pod \"3ae156b9-e4ff-4442-af05-16a5c951c5e0\" (UID: \"3ae156b9-e4ff-4442-af05-16a5c951c5e0\") " Jan 05 23:22:00 crc kubenswrapper[4910]: I0105 23:22:00.936551 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ae156b9-e4ff-4442-af05-16a5c951c5e0-config\") pod \"3ae156b9-e4ff-4442-af05-16a5c951c5e0\" (UID: \"3ae156b9-e4ff-4442-af05-16a5c951c5e0\") " Jan 05 23:22:00 crc kubenswrapper[4910]: I0105 23:22:00.958622 4910 scope.go:117] "RemoveContainer" containerID="f96eb75999b54670ffe1aed60c8a515017be8033a6cd70804269b2d08d2af581" Jan 05 23:22:00 crc kubenswrapper[4910]: I0105 23:22:00.971289 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ae156b9-e4ff-4442-af05-16a5c951c5e0-kube-api-access-zq7gw" (OuterVolumeSpecName: "kube-api-access-zq7gw") pod "3ae156b9-e4ff-4442-af05-16a5c951c5e0" (UID: "3ae156b9-e4ff-4442-af05-16a5c951c5e0"). InnerVolumeSpecName "kube-api-access-zq7gw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:22:00 crc kubenswrapper[4910]: I0105 23:22:00.997271 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ae156b9-e4ff-4442-af05-16a5c951c5e0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3ae156b9-e4ff-4442-af05-16a5c951c5e0" (UID: "3ae156b9-e4ff-4442-af05-16a5c951c5e0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:22:01 crc kubenswrapper[4910]: I0105 23:22:00.999987 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ae156b9-e4ff-4442-af05-16a5c951c5e0-config" (OuterVolumeSpecName: "config") pod "3ae156b9-e4ff-4442-af05-16a5c951c5e0" (UID: "3ae156b9-e4ff-4442-af05-16a5c951c5e0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:22:01 crc kubenswrapper[4910]: I0105 23:22:01.022638 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ae156b9-e4ff-4442-af05-16a5c951c5e0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3ae156b9-e4ff-4442-af05-16a5c951c5e0" (UID: "3ae156b9-e4ff-4442-af05-16a5c951c5e0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:22:01 crc kubenswrapper[4910]: I0105 23:22:01.038213 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3ae156b9-e4ff-4442-af05-16a5c951c5e0-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 05 23:22:01 crc kubenswrapper[4910]: I0105 23:22:01.038240 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3ae156b9-e4ff-4442-af05-16a5c951c5e0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 05 23:22:01 crc kubenswrapper[4910]: I0105 23:22:01.038249 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ae156b9-e4ff-4442-af05-16a5c951c5e0-config\") on node \"crc\" DevicePath \"\"" Jan 05 23:22:01 crc kubenswrapper[4910]: I0105 23:22:01.038259 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zq7gw\" (UniqueName: \"kubernetes.io/projected/3ae156b9-e4ff-4442-af05-16a5c951c5e0-kube-api-access-zq7gw\") on node \"crc\" DevicePath \"\"" Jan 05 23:22:01 crc kubenswrapper[4910]: I0105 23:22:01.040812 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ae156b9-e4ff-4442-af05-16a5c951c5e0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "3ae156b9-e4ff-4442-af05-16a5c951c5e0" (UID: "3ae156b9-e4ff-4442-af05-16a5c951c5e0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:22:01 crc kubenswrapper[4910]: I0105 23:22:01.044879 4910 scope.go:117] "RemoveContainer" containerID="47caaf867ba63c0742a6dddc5f2ba2efd25c18010801c132c9942b65686f4e9e" Jan 05 23:22:01 crc kubenswrapper[4910]: E0105 23:22:01.045408 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"47caaf867ba63c0742a6dddc5f2ba2efd25c18010801c132c9942b65686f4e9e\": container with ID starting with 47caaf867ba63c0742a6dddc5f2ba2efd25c18010801c132c9942b65686f4e9e not found: ID does not exist" containerID="47caaf867ba63c0742a6dddc5f2ba2efd25c18010801c132c9942b65686f4e9e" Jan 05 23:22:01 crc kubenswrapper[4910]: I0105 23:22:01.045454 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"47caaf867ba63c0742a6dddc5f2ba2efd25c18010801c132c9942b65686f4e9e"} err="failed to get container status \"47caaf867ba63c0742a6dddc5f2ba2efd25c18010801c132c9942b65686f4e9e\": rpc error: code = NotFound desc = could not find container \"47caaf867ba63c0742a6dddc5f2ba2efd25c18010801c132c9942b65686f4e9e\": container with ID starting with 47caaf867ba63c0742a6dddc5f2ba2efd25c18010801c132c9942b65686f4e9e not found: ID does not exist" Jan 05 23:22:01 crc kubenswrapper[4910]: I0105 23:22:01.045484 4910 scope.go:117] "RemoveContainer" containerID="f96eb75999b54670ffe1aed60c8a515017be8033a6cd70804269b2d08d2af581" Jan 05 23:22:01 crc kubenswrapper[4910]: E0105 23:22:01.045799 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f96eb75999b54670ffe1aed60c8a515017be8033a6cd70804269b2d08d2af581\": container with ID starting with f96eb75999b54670ffe1aed60c8a515017be8033a6cd70804269b2d08d2af581 not found: ID does not exist" containerID="f96eb75999b54670ffe1aed60c8a515017be8033a6cd70804269b2d08d2af581" Jan 05 23:22:01 crc kubenswrapper[4910]: I0105 23:22:01.045829 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f96eb75999b54670ffe1aed60c8a515017be8033a6cd70804269b2d08d2af581"} err="failed to get container status \"f96eb75999b54670ffe1aed60c8a515017be8033a6cd70804269b2d08d2af581\": rpc error: code = NotFound desc = could not find container \"f96eb75999b54670ffe1aed60c8a515017be8033a6cd70804269b2d08d2af581\": container with ID starting with f96eb75999b54670ffe1aed60c8a515017be8033a6cd70804269b2d08d2af581 not found: ID does not exist" Jan 05 23:22:01 crc kubenswrapper[4910]: I0105 23:22:01.139880 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3ae156b9-e4ff-4442-af05-16a5c951c5e0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 05 23:22:01 crc kubenswrapper[4910]: I0105 23:22:01.222174 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d84fc7bc9-gzscs"] Jan 05 23:22:01 crc kubenswrapper[4910]: I0105 23:22:01.228258 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6d84fc7bc9-gzscs"] Jan 05 23:22:02 crc kubenswrapper[4910]: I0105 23:22:02.722839 4910 scope.go:117] "RemoveContainer" containerID="c31f0b3ea3ce0d4b0fb62da87fc6d8144c3940f5821592c646ec514147dae31e" Jan 05 23:22:02 crc kubenswrapper[4910]: E0105 23:22:02.723995 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:22:02 crc kubenswrapper[4910]: I0105 23:22:02.737667 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ae156b9-e4ff-4442-af05-16a5c951c5e0" path="/var/lib/kubelet/pods/3ae156b9-e4ff-4442-af05-16a5c951c5e0/volumes" Jan 05 23:22:16 crc kubenswrapper[4910]: I0105 23:22:16.722004 4910 scope.go:117] "RemoveContainer" containerID="c31f0b3ea3ce0d4b0fb62da87fc6d8144c3940f5821592c646ec514147dae31e" Jan 05 23:22:16 crc kubenswrapper[4910]: E0105 23:22:16.723176 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:22:20 crc kubenswrapper[4910]: I0105 23:22:20.264903 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-6bc969c685-87gv2" Jan 05 23:22:28 crc kubenswrapper[4910]: I0105 23:22:28.605671 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-gzbt7"] Jan 05 23:22:28 crc kubenswrapper[4910]: E0105 23:22:28.606727 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ae156b9-e4ff-4442-af05-16a5c951c5e0" containerName="dnsmasq-dns" Jan 05 23:22:28 crc kubenswrapper[4910]: I0105 23:22:28.606750 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ae156b9-e4ff-4442-af05-16a5c951c5e0" containerName="dnsmasq-dns" Jan 05 23:22:28 crc kubenswrapper[4910]: E0105 23:22:28.606772 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ae156b9-e4ff-4442-af05-16a5c951c5e0" containerName="init" Jan 05 23:22:28 crc kubenswrapper[4910]: I0105 23:22:28.606783 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ae156b9-e4ff-4442-af05-16a5c951c5e0" containerName="init" Jan 05 23:22:28 crc kubenswrapper[4910]: I0105 23:22:28.607060 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ae156b9-e4ff-4442-af05-16a5c951c5e0" containerName="dnsmasq-dns" Jan 05 23:22:28 crc kubenswrapper[4910]: I0105 23:22:28.607891 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-gzbt7" Jan 05 23:22:28 crc kubenswrapper[4910]: I0105 23:22:28.621865 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-gzbt7"] Jan 05 23:22:28 crc kubenswrapper[4910]: I0105 23:22:28.697017 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-3695-account-create-update-dlx7s"] Jan 05 23:22:28 crc kubenswrapper[4910]: I0105 23:22:28.698502 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-3695-account-create-update-dlx7s" Jan 05 23:22:28 crc kubenswrapper[4910]: I0105 23:22:28.700587 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Jan 05 23:22:28 crc kubenswrapper[4910]: I0105 23:22:28.712279 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-3695-account-create-update-dlx7s"] Jan 05 23:22:28 crc kubenswrapper[4910]: I0105 23:22:28.739874 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-svwbs\" (UniqueName: \"kubernetes.io/projected/cc30a657-c8fd-41db-a82d-fd41a721b4d7-kube-api-access-svwbs\") pod \"glance-db-create-gzbt7\" (UID: \"cc30a657-c8fd-41db-a82d-fd41a721b4d7\") " pod="openstack/glance-db-create-gzbt7" Jan 05 23:22:28 crc kubenswrapper[4910]: I0105 23:22:28.739964 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc30a657-c8fd-41db-a82d-fd41a721b4d7-operator-scripts\") pod \"glance-db-create-gzbt7\" (UID: \"cc30a657-c8fd-41db-a82d-fd41a721b4d7\") " pod="openstack/glance-db-create-gzbt7" Jan 05 23:22:28 crc kubenswrapper[4910]: I0105 23:22:28.848912 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-svwbs\" (UniqueName: \"kubernetes.io/projected/cc30a657-c8fd-41db-a82d-fd41a721b4d7-kube-api-access-svwbs\") pod \"glance-db-create-gzbt7\" (UID: \"cc30a657-c8fd-41db-a82d-fd41a721b4d7\") " pod="openstack/glance-db-create-gzbt7" Jan 05 23:22:28 crc kubenswrapper[4910]: I0105 23:22:28.849216 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc30a657-c8fd-41db-a82d-fd41a721b4d7-operator-scripts\") pod \"glance-db-create-gzbt7\" (UID: \"cc30a657-c8fd-41db-a82d-fd41a721b4d7\") " pod="openstack/glance-db-create-gzbt7" Jan 05 23:22:28 crc kubenswrapper[4910]: I0105 23:22:28.849538 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1-operator-scripts\") pod \"glance-3695-account-create-update-dlx7s\" (UID: \"1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1\") " pod="openstack/glance-3695-account-create-update-dlx7s" Jan 05 23:22:28 crc kubenswrapper[4910]: I0105 23:22:28.849675 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tbjv8\" (UniqueName: \"kubernetes.io/projected/1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1-kube-api-access-tbjv8\") pod \"glance-3695-account-create-update-dlx7s\" (UID: \"1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1\") " pod="openstack/glance-3695-account-create-update-dlx7s" Jan 05 23:22:28 crc kubenswrapper[4910]: I0105 23:22:28.852308 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc30a657-c8fd-41db-a82d-fd41a721b4d7-operator-scripts\") pod \"glance-db-create-gzbt7\" (UID: \"cc30a657-c8fd-41db-a82d-fd41a721b4d7\") " pod="openstack/glance-db-create-gzbt7" Jan 05 23:22:28 crc kubenswrapper[4910]: I0105 23:22:28.878283 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-svwbs\" (UniqueName: \"kubernetes.io/projected/cc30a657-c8fd-41db-a82d-fd41a721b4d7-kube-api-access-svwbs\") pod \"glance-db-create-gzbt7\" (UID: \"cc30a657-c8fd-41db-a82d-fd41a721b4d7\") " pod="openstack/glance-db-create-gzbt7" Jan 05 23:22:28 crc kubenswrapper[4910]: I0105 23:22:28.931877 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-gzbt7" Jan 05 23:22:28 crc kubenswrapper[4910]: I0105 23:22:28.951071 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1-operator-scripts\") pod \"glance-3695-account-create-update-dlx7s\" (UID: \"1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1\") " pod="openstack/glance-3695-account-create-update-dlx7s" Jan 05 23:22:28 crc kubenswrapper[4910]: I0105 23:22:28.951167 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tbjv8\" (UniqueName: \"kubernetes.io/projected/1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1-kube-api-access-tbjv8\") pod \"glance-3695-account-create-update-dlx7s\" (UID: \"1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1\") " pod="openstack/glance-3695-account-create-update-dlx7s" Jan 05 23:22:28 crc kubenswrapper[4910]: I0105 23:22:28.952887 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1-operator-scripts\") pod \"glance-3695-account-create-update-dlx7s\" (UID: \"1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1\") " pod="openstack/glance-3695-account-create-update-dlx7s" Jan 05 23:22:28 crc kubenswrapper[4910]: I0105 23:22:28.971275 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tbjv8\" (UniqueName: \"kubernetes.io/projected/1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1-kube-api-access-tbjv8\") pod \"glance-3695-account-create-update-dlx7s\" (UID: \"1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1\") " pod="openstack/glance-3695-account-create-update-dlx7s" Jan 05 23:22:29 crc kubenswrapper[4910]: I0105 23:22:29.016902 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-3695-account-create-update-dlx7s" Jan 05 23:22:29 crc kubenswrapper[4910]: I0105 23:22:29.464827 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-gzbt7"] Jan 05 23:22:29 crc kubenswrapper[4910]: I0105 23:22:29.686451 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-3695-account-create-update-dlx7s"] Jan 05 23:22:30 crc kubenswrapper[4910]: I0105 23:22:30.237635 4910 generic.go:334] "Generic (PLEG): container finished" podID="1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1" containerID="a7eb2dea7c54212684db87b3c5d5a14b0740f3d86b797d329dbafe88ddb27fb2" exitCode=0 Jan 05 23:22:30 crc kubenswrapper[4910]: I0105 23:22:30.237718 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-3695-account-create-update-dlx7s" event={"ID":"1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1","Type":"ContainerDied","Data":"a7eb2dea7c54212684db87b3c5d5a14b0740f3d86b797d329dbafe88ddb27fb2"} Jan 05 23:22:30 crc kubenswrapper[4910]: I0105 23:22:30.237792 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-3695-account-create-update-dlx7s" event={"ID":"1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1","Type":"ContainerStarted","Data":"a2dcf3394f53ed2233fe2be5313cd62a33dd2e94cd1d1ea87e98f5b38093b4c1"} Jan 05 23:22:30 crc kubenswrapper[4910]: I0105 23:22:30.240213 4910 generic.go:334] "Generic (PLEG): container finished" podID="cc30a657-c8fd-41db-a82d-fd41a721b4d7" containerID="01c68606b8dab6983f3942011551c6db4a6476ce71faad0d3363dfb91f7b354a" exitCode=0 Jan 05 23:22:30 crc kubenswrapper[4910]: I0105 23:22:30.240276 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-gzbt7" event={"ID":"cc30a657-c8fd-41db-a82d-fd41a721b4d7","Type":"ContainerDied","Data":"01c68606b8dab6983f3942011551c6db4a6476ce71faad0d3363dfb91f7b354a"} Jan 05 23:22:30 crc kubenswrapper[4910]: I0105 23:22:30.240476 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-gzbt7" event={"ID":"cc30a657-c8fd-41db-a82d-fd41a721b4d7","Type":"ContainerStarted","Data":"a3dcdbb3369c1441a41d70ca19b07b994bbc64209b52ddc7bfb1f2584a56c166"} Jan 05 23:22:30 crc kubenswrapper[4910]: I0105 23:22:30.722568 4910 scope.go:117] "RemoveContainer" containerID="c31f0b3ea3ce0d4b0fb62da87fc6d8144c3940f5821592c646ec514147dae31e" Jan 05 23:22:30 crc kubenswrapper[4910]: E0105 23:22:30.723102 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:22:31 crc kubenswrapper[4910]: I0105 23:22:31.739693 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-3695-account-create-update-dlx7s" Jan 05 23:22:31 crc kubenswrapper[4910]: I0105 23:22:31.748355 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-gzbt7" Jan 05 23:22:31 crc kubenswrapper[4910]: I0105 23:22:31.917858 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tbjv8\" (UniqueName: \"kubernetes.io/projected/1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1-kube-api-access-tbjv8\") pod \"1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1\" (UID: \"1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1\") " Jan 05 23:22:31 crc kubenswrapper[4910]: I0105 23:22:31.918018 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1-operator-scripts\") pod \"1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1\" (UID: \"1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1\") " Jan 05 23:22:31 crc kubenswrapper[4910]: I0105 23:22:31.918236 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc30a657-c8fd-41db-a82d-fd41a721b4d7-operator-scripts\") pod \"cc30a657-c8fd-41db-a82d-fd41a721b4d7\" (UID: \"cc30a657-c8fd-41db-a82d-fd41a721b4d7\") " Jan 05 23:22:31 crc kubenswrapper[4910]: I0105 23:22:31.918292 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-svwbs\" (UniqueName: \"kubernetes.io/projected/cc30a657-c8fd-41db-a82d-fd41a721b4d7-kube-api-access-svwbs\") pod \"cc30a657-c8fd-41db-a82d-fd41a721b4d7\" (UID: \"cc30a657-c8fd-41db-a82d-fd41a721b4d7\") " Jan 05 23:22:31 crc kubenswrapper[4910]: I0105 23:22:31.919309 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc30a657-c8fd-41db-a82d-fd41a721b4d7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "cc30a657-c8fd-41db-a82d-fd41a721b4d7" (UID: "cc30a657-c8fd-41db-a82d-fd41a721b4d7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:22:31 crc kubenswrapper[4910]: I0105 23:22:31.919376 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1" (UID: "1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:22:31 crc kubenswrapper[4910]: I0105 23:22:31.929479 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1-kube-api-access-tbjv8" (OuterVolumeSpecName: "kube-api-access-tbjv8") pod "1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1" (UID: "1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1"). InnerVolumeSpecName "kube-api-access-tbjv8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:22:31 crc kubenswrapper[4910]: I0105 23:22:31.929634 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc30a657-c8fd-41db-a82d-fd41a721b4d7-kube-api-access-svwbs" (OuterVolumeSpecName: "kube-api-access-svwbs") pod "cc30a657-c8fd-41db-a82d-fd41a721b4d7" (UID: "cc30a657-c8fd-41db-a82d-fd41a721b4d7"). InnerVolumeSpecName "kube-api-access-svwbs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:22:32 crc kubenswrapper[4910]: I0105 23:22:32.021198 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc30a657-c8fd-41db-a82d-fd41a721b4d7-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:22:32 crc kubenswrapper[4910]: I0105 23:22:32.021265 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-svwbs\" (UniqueName: \"kubernetes.io/projected/cc30a657-c8fd-41db-a82d-fd41a721b4d7-kube-api-access-svwbs\") on node \"crc\" DevicePath \"\"" Jan 05 23:22:32 crc kubenswrapper[4910]: I0105 23:22:32.021288 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tbjv8\" (UniqueName: \"kubernetes.io/projected/1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1-kube-api-access-tbjv8\") on node \"crc\" DevicePath \"\"" Jan 05 23:22:32 crc kubenswrapper[4910]: I0105 23:22:32.021305 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:22:32 crc kubenswrapper[4910]: I0105 23:22:32.267864 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-3695-account-create-update-dlx7s" event={"ID":"1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1","Type":"ContainerDied","Data":"a2dcf3394f53ed2233fe2be5313cd62a33dd2e94cd1d1ea87e98f5b38093b4c1"} Jan 05 23:22:32 crc kubenswrapper[4910]: I0105 23:22:32.268382 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a2dcf3394f53ed2233fe2be5313cd62a33dd2e94cd1d1ea87e98f5b38093b4c1" Jan 05 23:22:32 crc kubenswrapper[4910]: I0105 23:22:32.267950 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-3695-account-create-update-dlx7s" Jan 05 23:22:32 crc kubenswrapper[4910]: I0105 23:22:32.271201 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-gzbt7" event={"ID":"cc30a657-c8fd-41db-a82d-fd41a721b4d7","Type":"ContainerDied","Data":"a3dcdbb3369c1441a41d70ca19b07b994bbc64209b52ddc7bfb1f2584a56c166"} Jan 05 23:22:32 crc kubenswrapper[4910]: I0105 23:22:32.271265 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a3dcdbb3369c1441a41d70ca19b07b994bbc64209b52ddc7bfb1f2584a56c166" Jan 05 23:22:32 crc kubenswrapper[4910]: I0105 23:22:32.271397 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-gzbt7" Jan 05 23:22:33 crc kubenswrapper[4910]: I0105 23:22:33.936874 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-skmcr"] Jan 05 23:22:33 crc kubenswrapper[4910]: E0105 23:22:33.937207 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc30a657-c8fd-41db-a82d-fd41a721b4d7" containerName="mariadb-database-create" Jan 05 23:22:33 crc kubenswrapper[4910]: I0105 23:22:33.937220 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc30a657-c8fd-41db-a82d-fd41a721b4d7" containerName="mariadb-database-create" Jan 05 23:22:33 crc kubenswrapper[4910]: E0105 23:22:33.937229 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1" containerName="mariadb-account-create-update" Jan 05 23:22:33 crc kubenswrapper[4910]: I0105 23:22:33.937235 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1" containerName="mariadb-account-create-update" Jan 05 23:22:33 crc kubenswrapper[4910]: I0105 23:22:33.937399 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1" containerName="mariadb-account-create-update" Jan 05 23:22:33 crc kubenswrapper[4910]: I0105 23:22:33.937413 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc30a657-c8fd-41db-a82d-fd41a721b4d7" containerName="mariadb-database-create" Jan 05 23:22:33 crc kubenswrapper[4910]: I0105 23:22:33.937945 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-skmcr" Jan 05 23:22:33 crc kubenswrapper[4910]: I0105 23:22:33.941753 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-xt8b5" Jan 05 23:22:33 crc kubenswrapper[4910]: I0105 23:22:33.941763 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Jan 05 23:22:34 crc kubenswrapper[4910]: I0105 23:22:33.961629 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-skmcr"] Jan 05 23:22:34 crc kubenswrapper[4910]: I0105 23:22:34.006258 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278-db-sync-config-data\") pod \"glance-db-sync-skmcr\" (UID: \"fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278\") " pod="openstack/glance-db-sync-skmcr" Jan 05 23:22:34 crc kubenswrapper[4910]: I0105 23:22:34.006326 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6cbjv\" (UniqueName: \"kubernetes.io/projected/fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278-kube-api-access-6cbjv\") pod \"glance-db-sync-skmcr\" (UID: \"fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278\") " pod="openstack/glance-db-sync-skmcr" Jan 05 23:22:34 crc kubenswrapper[4910]: I0105 23:22:34.006525 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278-combined-ca-bundle\") pod \"glance-db-sync-skmcr\" (UID: \"fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278\") " pod="openstack/glance-db-sync-skmcr" Jan 05 23:22:34 crc kubenswrapper[4910]: I0105 23:22:34.006554 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278-config-data\") pod \"glance-db-sync-skmcr\" (UID: \"fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278\") " pod="openstack/glance-db-sync-skmcr" Jan 05 23:22:34 crc kubenswrapper[4910]: I0105 23:22:34.108893 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278-combined-ca-bundle\") pod \"glance-db-sync-skmcr\" (UID: \"fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278\") " pod="openstack/glance-db-sync-skmcr" Jan 05 23:22:34 crc kubenswrapper[4910]: I0105 23:22:34.109395 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278-config-data\") pod \"glance-db-sync-skmcr\" (UID: \"fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278\") " pod="openstack/glance-db-sync-skmcr" Jan 05 23:22:34 crc kubenswrapper[4910]: I0105 23:22:34.109454 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278-db-sync-config-data\") pod \"glance-db-sync-skmcr\" (UID: \"fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278\") " pod="openstack/glance-db-sync-skmcr" Jan 05 23:22:34 crc kubenswrapper[4910]: I0105 23:22:34.109501 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6cbjv\" (UniqueName: \"kubernetes.io/projected/fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278-kube-api-access-6cbjv\") pod \"glance-db-sync-skmcr\" (UID: \"fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278\") " pod="openstack/glance-db-sync-skmcr" Jan 05 23:22:34 crc kubenswrapper[4910]: I0105 23:22:34.117083 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278-config-data\") pod \"glance-db-sync-skmcr\" (UID: \"fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278\") " pod="openstack/glance-db-sync-skmcr" Jan 05 23:22:34 crc kubenswrapper[4910]: I0105 23:22:34.126838 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278-db-sync-config-data\") pod \"glance-db-sync-skmcr\" (UID: \"fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278\") " pod="openstack/glance-db-sync-skmcr" Jan 05 23:22:34 crc kubenswrapper[4910]: I0105 23:22:34.127105 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278-combined-ca-bundle\") pod \"glance-db-sync-skmcr\" (UID: \"fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278\") " pod="openstack/glance-db-sync-skmcr" Jan 05 23:22:34 crc kubenswrapper[4910]: I0105 23:22:34.134273 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6cbjv\" (UniqueName: \"kubernetes.io/projected/fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278-kube-api-access-6cbjv\") pod \"glance-db-sync-skmcr\" (UID: \"fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278\") " pod="openstack/glance-db-sync-skmcr" Jan 05 23:22:34 crc kubenswrapper[4910]: I0105 23:22:34.327366 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-skmcr" Jan 05 23:22:34 crc kubenswrapper[4910]: I0105 23:22:34.943835 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-skmcr"] Jan 05 23:22:35 crc kubenswrapper[4910]: I0105 23:22:35.303563 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-skmcr" event={"ID":"fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278","Type":"ContainerStarted","Data":"ddce75ef68a69f48028c40481daefe472941f1909cbea5bae4c7d3585920f177"} Jan 05 23:22:36 crc kubenswrapper[4910]: I0105 23:22:36.325470 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-skmcr" event={"ID":"fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278","Type":"ContainerStarted","Data":"104c3a4d156d738369debee93e3a01cea118fe5a08e323bccbacb1b2e82e3840"} Jan 05 23:22:36 crc kubenswrapper[4910]: I0105 23:22:36.352391 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-skmcr" podStartSLOduration=3.352361651 podStartE2EDuration="3.352361651s" podCreationTimestamp="2026-01-05 23:22:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:22:36.349856189 +0000 UTC m=+5487.927353909" watchObservedRunningTime="2026-01-05 23:22:36.352361651 +0000 UTC m=+5487.929859341" Jan 05 23:22:39 crc kubenswrapper[4910]: I0105 23:22:39.363544 4910 generic.go:334] "Generic (PLEG): container finished" podID="fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278" containerID="104c3a4d156d738369debee93e3a01cea118fe5a08e323bccbacb1b2e82e3840" exitCode=0 Jan 05 23:22:39 crc kubenswrapper[4910]: I0105 23:22:39.363739 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-skmcr" event={"ID":"fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278","Type":"ContainerDied","Data":"104c3a4d156d738369debee93e3a01cea118fe5a08e323bccbacb1b2e82e3840"} Jan 05 23:22:40 crc kubenswrapper[4910]: I0105 23:22:40.784531 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-skmcr" Jan 05 23:22:40 crc kubenswrapper[4910]: I0105 23:22:40.946545 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278-db-sync-config-data\") pod \"fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278\" (UID: \"fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278\") " Jan 05 23:22:40 crc kubenswrapper[4910]: I0105 23:22:40.946611 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278-config-data\") pod \"fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278\" (UID: \"fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278\") " Jan 05 23:22:40 crc kubenswrapper[4910]: I0105 23:22:40.946672 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6cbjv\" (UniqueName: \"kubernetes.io/projected/fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278-kube-api-access-6cbjv\") pod \"fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278\" (UID: \"fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278\") " Jan 05 23:22:40 crc kubenswrapper[4910]: I0105 23:22:40.946973 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278-combined-ca-bundle\") pod \"fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278\" (UID: \"fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278\") " Jan 05 23:22:40 crc kubenswrapper[4910]: I0105 23:22:40.955585 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278" (UID: "fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:22:40 crc kubenswrapper[4910]: I0105 23:22:40.963301 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278-kube-api-access-6cbjv" (OuterVolumeSpecName: "kube-api-access-6cbjv") pod "fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278" (UID: "fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278"). InnerVolumeSpecName "kube-api-access-6cbjv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:22:40 crc kubenswrapper[4910]: I0105 23:22:40.988076 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278" (UID: "fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:22:41 crc kubenswrapper[4910]: I0105 23:22:41.018823 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278-config-data" (OuterVolumeSpecName: "config-data") pod "fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278" (UID: "fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:22:41 crc kubenswrapper[4910]: I0105 23:22:41.049691 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6cbjv\" (UniqueName: \"kubernetes.io/projected/fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278-kube-api-access-6cbjv\") on node \"crc\" DevicePath \"\"" Jan 05 23:22:41 crc kubenswrapper[4910]: I0105 23:22:41.050078 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:22:41 crc kubenswrapper[4910]: I0105 23:22:41.050210 4910 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:22:41 crc kubenswrapper[4910]: I0105 23:22:41.050232 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:22:41 crc kubenswrapper[4910]: I0105 23:22:41.389694 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-skmcr" event={"ID":"fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278","Type":"ContainerDied","Data":"ddce75ef68a69f48028c40481daefe472941f1909cbea5bae4c7d3585920f177"} Jan 05 23:22:41 crc kubenswrapper[4910]: I0105 23:22:41.389758 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ddce75ef68a69f48028c40481daefe472941f1909cbea5bae4c7d3585920f177" Jan 05 23:22:41 crc kubenswrapper[4910]: I0105 23:22:41.389853 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-skmcr" Jan 05 23:22:41 crc kubenswrapper[4910]: I0105 23:22:41.774782 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 23:22:41 crc kubenswrapper[4910]: E0105 23:22:41.775406 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278" containerName="glance-db-sync" Jan 05 23:22:41 crc kubenswrapper[4910]: I0105 23:22:41.775422 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278" containerName="glance-db-sync" Jan 05 23:22:41 crc kubenswrapper[4910]: I0105 23:22:41.775589 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278" containerName="glance-db-sync" Jan 05 23:22:41 crc kubenswrapper[4910]: I0105 23:22:41.777884 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 05 23:22:41 crc kubenswrapper[4910]: I0105 23:22:41.780721 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-xt8b5" Jan 05 23:22:41 crc kubenswrapper[4910]: I0105 23:22:41.781544 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 05 23:22:41 crc kubenswrapper[4910]: I0105 23:22:41.781541 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Jan 05 23:22:41 crc kubenswrapper[4910]: I0105 23:22:41.781592 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 05 23:22:41 crc kubenswrapper[4910]: I0105 23:22:41.788468 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 23:22:41 crc kubenswrapper[4910]: I0105 23:22:41.904774 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-bf548fdbf-ns22l"] Jan 05 23:22:41 crc kubenswrapper[4910]: I0105 23:22:41.931584 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bf548fdbf-ns22l" Jan 05 23:22:41 crc kubenswrapper[4910]: I0105 23:22:41.960244 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bf548fdbf-ns22l"] Jan 05 23:22:41 crc kubenswrapper[4910]: I0105 23:22:41.966862 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/aa11d5de-6574-433a-a046-0853d973066b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"aa11d5de-6574-433a-a046-0853d973066b\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:41 crc kubenswrapper[4910]: I0105 23:22:41.966925 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa11d5de-6574-433a-a046-0853d973066b-config-data\") pod \"glance-default-external-api-0\" (UID: \"aa11d5de-6574-433a-a046-0853d973066b\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:41 crc kubenswrapper[4910]: I0105 23:22:41.967063 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4bqvw\" (UniqueName: \"kubernetes.io/projected/aa11d5de-6574-433a-a046-0853d973066b-kube-api-access-4bqvw\") pod \"glance-default-external-api-0\" (UID: \"aa11d5de-6574-433a-a046-0853d973066b\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:41 crc kubenswrapper[4910]: I0105 23:22:41.967173 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aa11d5de-6574-433a-a046-0853d973066b-logs\") pod \"glance-default-external-api-0\" (UID: \"aa11d5de-6574-433a-a046-0853d973066b\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:41 crc kubenswrapper[4910]: I0105 23:22:41.967613 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aa11d5de-6574-433a-a046-0853d973066b-scripts\") pod \"glance-default-external-api-0\" (UID: \"aa11d5de-6574-433a-a046-0853d973066b\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:41 crc kubenswrapper[4910]: I0105 23:22:41.967797 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/aa11d5de-6574-433a-a046-0853d973066b-ceph\") pod \"glance-default-external-api-0\" (UID: \"aa11d5de-6574-433a-a046-0853d973066b\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:41 crc kubenswrapper[4910]: I0105 23:22:41.967860 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa11d5de-6574-433a-a046-0853d973066b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"aa11d5de-6574-433a-a046-0853d973066b\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.059372 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.060782 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.064069 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.069526 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aa11d5de-6574-433a-a046-0853d973066b-logs\") pod \"glance-default-external-api-0\" (UID: \"aa11d5de-6574-433a-a046-0853d973066b\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.069581 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9-dns-svc\") pod \"dnsmasq-dns-bf548fdbf-ns22l\" (UID: \"f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9\") " pod="openstack/dnsmasq-dns-bf548fdbf-ns22l" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.069645 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bw6dj\" (UniqueName: \"kubernetes.io/projected/f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9-kube-api-access-bw6dj\") pod \"dnsmasq-dns-bf548fdbf-ns22l\" (UID: \"f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9\") " pod="openstack/dnsmasq-dns-bf548fdbf-ns22l" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.069774 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9-config\") pod \"dnsmasq-dns-bf548fdbf-ns22l\" (UID: \"f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9\") " pod="openstack/dnsmasq-dns-bf548fdbf-ns22l" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.069838 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aa11d5de-6574-433a-a046-0853d973066b-scripts\") pod \"glance-default-external-api-0\" (UID: \"aa11d5de-6574-433a-a046-0853d973066b\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.069929 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9-ovsdbserver-nb\") pod \"dnsmasq-dns-bf548fdbf-ns22l\" (UID: \"f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9\") " pod="openstack/dnsmasq-dns-bf548fdbf-ns22l" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.069942 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aa11d5de-6574-433a-a046-0853d973066b-logs\") pod \"glance-default-external-api-0\" (UID: \"aa11d5de-6574-433a-a046-0853d973066b\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.069960 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/aa11d5de-6574-433a-a046-0853d973066b-ceph\") pod \"glance-default-external-api-0\" (UID: \"aa11d5de-6574-433a-a046-0853d973066b\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.069988 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9-ovsdbserver-sb\") pod \"dnsmasq-dns-bf548fdbf-ns22l\" (UID: \"f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9\") " pod="openstack/dnsmasq-dns-bf548fdbf-ns22l" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.070011 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa11d5de-6574-433a-a046-0853d973066b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"aa11d5de-6574-433a-a046-0853d973066b\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.070090 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/aa11d5de-6574-433a-a046-0853d973066b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"aa11d5de-6574-433a-a046-0853d973066b\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.070264 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa11d5de-6574-433a-a046-0853d973066b-config-data\") pod \"glance-default-external-api-0\" (UID: \"aa11d5de-6574-433a-a046-0853d973066b\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.070323 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4bqvw\" (UniqueName: \"kubernetes.io/projected/aa11d5de-6574-433a-a046-0853d973066b-kube-api-access-4bqvw\") pod \"glance-default-external-api-0\" (UID: \"aa11d5de-6574-433a-a046-0853d973066b\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.070499 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/aa11d5de-6574-433a-a046-0853d973066b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"aa11d5de-6574-433a-a046-0853d973066b\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.095859 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa11d5de-6574-433a-a046-0853d973066b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"aa11d5de-6574-433a-a046-0853d973066b\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.098246 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa11d5de-6574-433a-a046-0853d973066b-config-data\") pod \"glance-default-external-api-0\" (UID: \"aa11d5de-6574-433a-a046-0853d973066b\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.100794 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aa11d5de-6574-433a-a046-0853d973066b-scripts\") pod \"glance-default-external-api-0\" (UID: \"aa11d5de-6574-433a-a046-0853d973066b\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.104168 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/aa11d5de-6574-433a-a046-0853d973066b-ceph\") pod \"glance-default-external-api-0\" (UID: \"aa11d5de-6574-433a-a046-0853d973066b\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.105456 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4bqvw\" (UniqueName: \"kubernetes.io/projected/aa11d5de-6574-433a-a046-0853d973066b-kube-api-access-4bqvw\") pod \"glance-default-external-api-0\" (UID: \"aa11d5de-6574-433a-a046-0853d973066b\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.105982 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.176745 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9-ovsdbserver-nb\") pod \"dnsmasq-dns-bf548fdbf-ns22l\" (UID: \"f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9\") " pod="openstack/dnsmasq-dns-bf548fdbf-ns22l" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.176800 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dc49cf3f-1ba4-419c-86f0-506b0001f341-logs\") pod \"glance-default-internal-api-0\" (UID: \"dc49cf3f-1ba4-419c-86f0-506b0001f341\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.176827 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9-ovsdbserver-sb\") pod \"dnsmasq-dns-bf548fdbf-ns22l\" (UID: \"f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9\") " pod="openstack/dnsmasq-dns-bf548fdbf-ns22l" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.176887 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/dc49cf3f-1ba4-419c-86f0-506b0001f341-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"dc49cf3f-1ba4-419c-86f0-506b0001f341\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.176918 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dc49cf3f-1ba4-419c-86f0-506b0001f341-config-data\") pod \"glance-default-internal-api-0\" (UID: \"dc49cf3f-1ba4-419c-86f0-506b0001f341\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.176937 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9-dns-svc\") pod \"dnsmasq-dns-bf548fdbf-ns22l\" (UID: \"f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9\") " pod="openstack/dnsmasq-dns-bf548fdbf-ns22l" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.176980 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dc49cf3f-1ba4-419c-86f0-506b0001f341-scripts\") pod \"glance-default-internal-api-0\" (UID: \"dc49cf3f-1ba4-419c-86f0-506b0001f341\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.177022 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bw6dj\" (UniqueName: \"kubernetes.io/projected/f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9-kube-api-access-bw6dj\") pod \"dnsmasq-dns-bf548fdbf-ns22l\" (UID: \"f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9\") " pod="openstack/dnsmasq-dns-bf548fdbf-ns22l" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.177047 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/dc49cf3f-1ba4-419c-86f0-506b0001f341-ceph\") pod \"glance-default-internal-api-0\" (UID: \"dc49cf3f-1ba4-419c-86f0-506b0001f341\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.177064 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9-config\") pod \"dnsmasq-dns-bf548fdbf-ns22l\" (UID: \"f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9\") " pod="openstack/dnsmasq-dns-bf548fdbf-ns22l" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.177085 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-knt4z\" (UniqueName: \"kubernetes.io/projected/dc49cf3f-1ba4-419c-86f0-506b0001f341-kube-api-access-knt4z\") pod \"glance-default-internal-api-0\" (UID: \"dc49cf3f-1ba4-419c-86f0-506b0001f341\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.177103 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc49cf3f-1ba4-419c-86f0-506b0001f341-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"dc49cf3f-1ba4-419c-86f0-506b0001f341\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.178163 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9-ovsdbserver-sb\") pod \"dnsmasq-dns-bf548fdbf-ns22l\" (UID: \"f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9\") " pod="openstack/dnsmasq-dns-bf548fdbf-ns22l" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.178676 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9-dns-svc\") pod \"dnsmasq-dns-bf548fdbf-ns22l\" (UID: \"f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9\") " pod="openstack/dnsmasq-dns-bf548fdbf-ns22l" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.178700 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9-config\") pod \"dnsmasq-dns-bf548fdbf-ns22l\" (UID: \"f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9\") " pod="openstack/dnsmasq-dns-bf548fdbf-ns22l" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.180081 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9-ovsdbserver-nb\") pod \"dnsmasq-dns-bf548fdbf-ns22l\" (UID: \"f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9\") " pod="openstack/dnsmasq-dns-bf548fdbf-ns22l" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.195837 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bw6dj\" (UniqueName: \"kubernetes.io/projected/f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9-kube-api-access-bw6dj\") pod \"dnsmasq-dns-bf548fdbf-ns22l\" (UID: \"f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9\") " pod="openstack/dnsmasq-dns-bf548fdbf-ns22l" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.266174 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bf548fdbf-ns22l" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.278993 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dc49cf3f-1ba4-419c-86f0-506b0001f341-logs\") pod \"glance-default-internal-api-0\" (UID: \"dc49cf3f-1ba4-419c-86f0-506b0001f341\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.279361 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/dc49cf3f-1ba4-419c-86f0-506b0001f341-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"dc49cf3f-1ba4-419c-86f0-506b0001f341\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.279463 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dc49cf3f-1ba4-419c-86f0-506b0001f341-config-data\") pod \"glance-default-internal-api-0\" (UID: \"dc49cf3f-1ba4-419c-86f0-506b0001f341\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.279540 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dc49cf3f-1ba4-419c-86f0-506b0001f341-logs\") pod \"glance-default-internal-api-0\" (UID: \"dc49cf3f-1ba4-419c-86f0-506b0001f341\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.279630 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dc49cf3f-1ba4-419c-86f0-506b0001f341-scripts\") pod \"glance-default-internal-api-0\" (UID: \"dc49cf3f-1ba4-419c-86f0-506b0001f341\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.279725 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/dc49cf3f-1ba4-419c-86f0-506b0001f341-ceph\") pod \"glance-default-internal-api-0\" (UID: \"dc49cf3f-1ba4-419c-86f0-506b0001f341\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.279800 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-knt4z\" (UniqueName: \"kubernetes.io/projected/dc49cf3f-1ba4-419c-86f0-506b0001f341-kube-api-access-knt4z\") pod \"glance-default-internal-api-0\" (UID: \"dc49cf3f-1ba4-419c-86f0-506b0001f341\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.279869 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc49cf3f-1ba4-419c-86f0-506b0001f341-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"dc49cf3f-1ba4-419c-86f0-506b0001f341\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.279940 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/dc49cf3f-1ba4-419c-86f0-506b0001f341-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"dc49cf3f-1ba4-419c-86f0-506b0001f341\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.283578 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/dc49cf3f-1ba4-419c-86f0-506b0001f341-ceph\") pod \"glance-default-internal-api-0\" (UID: \"dc49cf3f-1ba4-419c-86f0-506b0001f341\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.285372 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc49cf3f-1ba4-419c-86f0-506b0001f341-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"dc49cf3f-1ba4-419c-86f0-506b0001f341\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.286599 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dc49cf3f-1ba4-419c-86f0-506b0001f341-scripts\") pod \"glance-default-internal-api-0\" (UID: \"dc49cf3f-1ba4-419c-86f0-506b0001f341\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.292242 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dc49cf3f-1ba4-419c-86f0-506b0001f341-config-data\") pod \"glance-default-internal-api-0\" (UID: \"dc49cf3f-1ba4-419c-86f0-506b0001f341\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.303893 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-knt4z\" (UniqueName: \"kubernetes.io/projected/dc49cf3f-1ba4-419c-86f0-506b0001f341-kube-api-access-knt4z\") pod \"glance-default-internal-api-0\" (UID: \"dc49cf3f-1ba4-419c-86f0-506b0001f341\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.379357 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.393922 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.742775 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bf548fdbf-ns22l"] Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.802050 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 23:22:42 crc kubenswrapper[4910]: W0105 23:22:42.961431 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaa11d5de_6574_433a_a046_0853d973066b.slice/crio-7629f49d5f8800a4711471d462803b985e7929970d5416f059cd037675ae4bd7 WatchSource:0}: Error finding container 7629f49d5f8800a4711471d462803b985e7929970d5416f059cd037675ae4bd7: Status 404 returned error can't find the container with id 7629f49d5f8800a4711471d462803b985e7929970d5416f059cd037675ae4bd7 Jan 05 23:22:42 crc kubenswrapper[4910]: I0105 23:22:42.972050 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 23:22:43 crc kubenswrapper[4910]: I0105 23:22:43.079479 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 23:22:43 crc kubenswrapper[4910]: W0105 23:22:43.092788 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddc49cf3f_1ba4_419c_86f0_506b0001f341.slice/crio-f90683aeeb53275737cc7a690b5ea1b13439e2688a4d9bdb892adf5cc02c4b85 WatchSource:0}: Error finding container f90683aeeb53275737cc7a690b5ea1b13439e2688a4d9bdb892adf5cc02c4b85: Status 404 returned error can't find the container with id f90683aeeb53275737cc7a690b5ea1b13439e2688a4d9bdb892adf5cc02c4b85 Jan 05 23:22:43 crc kubenswrapper[4910]: I0105 23:22:43.431898 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"aa11d5de-6574-433a-a046-0853d973066b","Type":"ContainerStarted","Data":"7629f49d5f8800a4711471d462803b985e7929970d5416f059cd037675ae4bd7"} Jan 05 23:22:43 crc kubenswrapper[4910]: I0105 23:22:43.434102 4910 generic.go:334] "Generic (PLEG): container finished" podID="f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9" containerID="fb44cbb288aa4a3310274b97bfb71f7326bda9a92406e3fa2a83d33247a4c43b" exitCode=0 Jan 05 23:22:43 crc kubenswrapper[4910]: I0105 23:22:43.435041 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bf548fdbf-ns22l" event={"ID":"f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9","Type":"ContainerDied","Data":"fb44cbb288aa4a3310274b97bfb71f7326bda9a92406e3fa2a83d33247a4c43b"} Jan 05 23:22:43 crc kubenswrapper[4910]: I0105 23:22:43.435099 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bf548fdbf-ns22l" event={"ID":"f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9","Type":"ContainerStarted","Data":"604f8a36a8d62465e8cb4738dc71606d68a363c654d71bc73a6743b974ed093e"} Jan 05 23:22:43 crc kubenswrapper[4910]: I0105 23:22:43.438898 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"dc49cf3f-1ba4-419c-86f0-506b0001f341","Type":"ContainerStarted","Data":"f90683aeeb53275737cc7a690b5ea1b13439e2688a4d9bdb892adf5cc02c4b85"} Jan 05 23:22:43 crc kubenswrapper[4910]: I0105 23:22:43.721202 4910 scope.go:117] "RemoveContainer" containerID="c31f0b3ea3ce0d4b0fb62da87fc6d8144c3940f5821592c646ec514147dae31e" Jan 05 23:22:43 crc kubenswrapper[4910]: E0105 23:22:43.721956 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:22:44 crc kubenswrapper[4910]: I0105 23:22:44.450541 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"dc49cf3f-1ba4-419c-86f0-506b0001f341","Type":"ContainerStarted","Data":"c8fdcd8725e763daac429ed2f4d8366a842d6a99a8136799c3c2ea897b300116"} Jan 05 23:22:44 crc kubenswrapper[4910]: I0105 23:22:44.451034 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"dc49cf3f-1ba4-419c-86f0-506b0001f341","Type":"ContainerStarted","Data":"e1672e9be51b76e794618e1859faf0b93f3dcd473e3c46211dc250781b61aedf"} Jan 05 23:22:44 crc kubenswrapper[4910]: I0105 23:22:44.452950 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"aa11d5de-6574-433a-a046-0853d973066b","Type":"ContainerStarted","Data":"bf7d568494db5335ab05824c2d442c84b4f341f5700727bfd3cab3eec55a5012"} Jan 05 23:22:44 crc kubenswrapper[4910]: I0105 23:22:44.453044 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"aa11d5de-6574-433a-a046-0853d973066b","Type":"ContainerStarted","Data":"ec60b4519c14ce3660b04f5524c6a17a6d17b8be60e1eef61d50237846b3f0a0"} Jan 05 23:22:44 crc kubenswrapper[4910]: I0105 23:22:44.453116 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="aa11d5de-6574-433a-a046-0853d973066b" containerName="glance-httpd" containerID="cri-o://bf7d568494db5335ab05824c2d442c84b4f341f5700727bfd3cab3eec55a5012" gracePeriod=30 Jan 05 23:22:44 crc kubenswrapper[4910]: I0105 23:22:44.453067 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="aa11d5de-6574-433a-a046-0853d973066b" containerName="glance-log" containerID="cri-o://ec60b4519c14ce3660b04f5524c6a17a6d17b8be60e1eef61d50237846b3f0a0" gracePeriod=30 Jan 05 23:22:44 crc kubenswrapper[4910]: I0105 23:22:44.456793 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bf548fdbf-ns22l" event={"ID":"f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9","Type":"ContainerStarted","Data":"36c8bbc9d52d78a938cdc8e2ae378726984edc58bdf0c119319758644c294da5"} Jan 05 23:22:44 crc kubenswrapper[4910]: I0105 23:22:44.457614 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-bf548fdbf-ns22l" Jan 05 23:22:44 crc kubenswrapper[4910]: I0105 23:22:44.476457 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=2.476432086 podStartE2EDuration="2.476432086s" podCreationTimestamp="2026-01-05 23:22:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:22:44.46849559 +0000 UTC m=+5496.045993260" watchObservedRunningTime="2026-01-05 23:22:44.476432086 +0000 UTC m=+5496.053929756" Jan 05 23:22:44 crc kubenswrapper[4910]: I0105 23:22:44.500474 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.500452081 podStartE2EDuration="3.500452081s" podCreationTimestamp="2026-01-05 23:22:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:22:44.492430752 +0000 UTC m=+5496.069928422" watchObservedRunningTime="2026-01-05 23:22:44.500452081 +0000 UTC m=+5496.077949751" Jan 05 23:22:44 crc kubenswrapper[4910]: I0105 23:22:44.780624 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-bf548fdbf-ns22l" podStartSLOduration=3.78059855 podStartE2EDuration="3.78059855s" podCreationTimestamp="2026-01-05 23:22:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:22:44.529669995 +0000 UTC m=+5496.107167665" watchObservedRunningTime="2026-01-05 23:22:44.78059855 +0000 UTC m=+5496.358096210" Jan 05 23:22:44 crc kubenswrapper[4910]: I0105 23:22:44.783511 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.127162 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.283273 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aa11d5de-6574-433a-a046-0853d973066b-scripts\") pod \"aa11d5de-6574-433a-a046-0853d973066b\" (UID: \"aa11d5de-6574-433a-a046-0853d973066b\") " Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.283345 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa11d5de-6574-433a-a046-0853d973066b-combined-ca-bundle\") pod \"aa11d5de-6574-433a-a046-0853d973066b\" (UID: \"aa11d5de-6574-433a-a046-0853d973066b\") " Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.283436 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa11d5de-6574-433a-a046-0853d973066b-config-data\") pod \"aa11d5de-6574-433a-a046-0853d973066b\" (UID: \"aa11d5de-6574-433a-a046-0853d973066b\") " Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.283509 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/aa11d5de-6574-433a-a046-0853d973066b-httpd-run\") pod \"aa11d5de-6574-433a-a046-0853d973066b\" (UID: \"aa11d5de-6574-433a-a046-0853d973066b\") " Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.283569 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/aa11d5de-6574-433a-a046-0853d973066b-ceph\") pod \"aa11d5de-6574-433a-a046-0853d973066b\" (UID: \"aa11d5de-6574-433a-a046-0853d973066b\") " Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.283594 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aa11d5de-6574-433a-a046-0853d973066b-logs\") pod \"aa11d5de-6574-433a-a046-0853d973066b\" (UID: \"aa11d5de-6574-433a-a046-0853d973066b\") " Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.283697 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4bqvw\" (UniqueName: \"kubernetes.io/projected/aa11d5de-6574-433a-a046-0853d973066b-kube-api-access-4bqvw\") pod \"aa11d5de-6574-433a-a046-0853d973066b\" (UID: \"aa11d5de-6574-433a-a046-0853d973066b\") " Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.284347 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aa11d5de-6574-433a-a046-0853d973066b-logs" (OuterVolumeSpecName: "logs") pod "aa11d5de-6574-433a-a046-0853d973066b" (UID: "aa11d5de-6574-433a-a046-0853d973066b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.284364 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aa11d5de-6574-433a-a046-0853d973066b-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "aa11d5de-6574-433a-a046-0853d973066b" (UID: "aa11d5de-6574-433a-a046-0853d973066b"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.290165 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa11d5de-6574-433a-a046-0853d973066b-ceph" (OuterVolumeSpecName: "ceph") pod "aa11d5de-6574-433a-a046-0853d973066b" (UID: "aa11d5de-6574-433a-a046-0853d973066b"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.290652 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa11d5de-6574-433a-a046-0853d973066b-kube-api-access-4bqvw" (OuterVolumeSpecName: "kube-api-access-4bqvw") pod "aa11d5de-6574-433a-a046-0853d973066b" (UID: "aa11d5de-6574-433a-a046-0853d973066b"). InnerVolumeSpecName "kube-api-access-4bqvw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.290953 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa11d5de-6574-433a-a046-0853d973066b-scripts" (OuterVolumeSpecName: "scripts") pod "aa11d5de-6574-433a-a046-0853d973066b" (UID: "aa11d5de-6574-433a-a046-0853d973066b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.311115 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa11d5de-6574-433a-a046-0853d973066b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "aa11d5de-6574-433a-a046-0853d973066b" (UID: "aa11d5de-6574-433a-a046-0853d973066b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.337805 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa11d5de-6574-433a-a046-0853d973066b-config-data" (OuterVolumeSpecName: "config-data") pod "aa11d5de-6574-433a-a046-0853d973066b" (UID: "aa11d5de-6574-433a-a046-0853d973066b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.385542 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4bqvw\" (UniqueName: \"kubernetes.io/projected/aa11d5de-6574-433a-a046-0853d973066b-kube-api-access-4bqvw\") on node \"crc\" DevicePath \"\"" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.385586 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aa11d5de-6574-433a-a046-0853d973066b-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.385599 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa11d5de-6574-433a-a046-0853d973066b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.385611 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa11d5de-6574-433a-a046-0853d973066b-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.385624 4910 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/aa11d5de-6574-433a-a046-0853d973066b-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.385638 4910 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/aa11d5de-6574-433a-a046-0853d973066b-ceph\") on node \"crc\" DevicePath \"\"" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.385648 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aa11d5de-6574-433a-a046-0853d973066b-logs\") on node \"crc\" DevicePath \"\"" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.466519 4910 generic.go:334] "Generic (PLEG): container finished" podID="aa11d5de-6574-433a-a046-0853d973066b" containerID="bf7d568494db5335ab05824c2d442c84b4f341f5700727bfd3cab3eec55a5012" exitCode=0 Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.466560 4910 generic.go:334] "Generic (PLEG): container finished" podID="aa11d5de-6574-433a-a046-0853d973066b" containerID="ec60b4519c14ce3660b04f5524c6a17a6d17b8be60e1eef61d50237846b3f0a0" exitCode=143 Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.466579 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"aa11d5de-6574-433a-a046-0853d973066b","Type":"ContainerDied","Data":"bf7d568494db5335ab05824c2d442c84b4f341f5700727bfd3cab3eec55a5012"} Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.466652 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.466684 4910 scope.go:117] "RemoveContainer" containerID="bf7d568494db5335ab05824c2d442c84b4f341f5700727bfd3cab3eec55a5012" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.466665 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"aa11d5de-6574-433a-a046-0853d973066b","Type":"ContainerDied","Data":"ec60b4519c14ce3660b04f5524c6a17a6d17b8be60e1eef61d50237846b3f0a0"} Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.466764 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"aa11d5de-6574-433a-a046-0853d973066b","Type":"ContainerDied","Data":"7629f49d5f8800a4711471d462803b985e7929970d5416f059cd037675ae4bd7"} Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.497575 4910 scope.go:117] "RemoveContainer" containerID="ec60b4519c14ce3660b04f5524c6a17a6d17b8be60e1eef61d50237846b3f0a0" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.514227 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.520818 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.553005 4910 scope.go:117] "RemoveContainer" containerID="bf7d568494db5335ab05824c2d442c84b4f341f5700727bfd3cab3eec55a5012" Jan 05 23:22:45 crc kubenswrapper[4910]: E0105 23:22:45.555774 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf7d568494db5335ab05824c2d442c84b4f341f5700727bfd3cab3eec55a5012\": container with ID starting with bf7d568494db5335ab05824c2d442c84b4f341f5700727bfd3cab3eec55a5012 not found: ID does not exist" containerID="bf7d568494db5335ab05824c2d442c84b4f341f5700727bfd3cab3eec55a5012" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.555828 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf7d568494db5335ab05824c2d442c84b4f341f5700727bfd3cab3eec55a5012"} err="failed to get container status \"bf7d568494db5335ab05824c2d442c84b4f341f5700727bfd3cab3eec55a5012\": rpc error: code = NotFound desc = could not find container \"bf7d568494db5335ab05824c2d442c84b4f341f5700727bfd3cab3eec55a5012\": container with ID starting with bf7d568494db5335ab05824c2d442c84b4f341f5700727bfd3cab3eec55a5012 not found: ID does not exist" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.555859 4910 scope.go:117] "RemoveContainer" containerID="ec60b4519c14ce3660b04f5524c6a17a6d17b8be60e1eef61d50237846b3f0a0" Jan 05 23:22:45 crc kubenswrapper[4910]: E0105 23:22:45.556410 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ec60b4519c14ce3660b04f5524c6a17a6d17b8be60e1eef61d50237846b3f0a0\": container with ID starting with ec60b4519c14ce3660b04f5524c6a17a6d17b8be60e1eef61d50237846b3f0a0 not found: ID does not exist" containerID="ec60b4519c14ce3660b04f5524c6a17a6d17b8be60e1eef61d50237846b3f0a0" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.556429 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ec60b4519c14ce3660b04f5524c6a17a6d17b8be60e1eef61d50237846b3f0a0"} err="failed to get container status \"ec60b4519c14ce3660b04f5524c6a17a6d17b8be60e1eef61d50237846b3f0a0\": rpc error: code = NotFound desc = could not find container \"ec60b4519c14ce3660b04f5524c6a17a6d17b8be60e1eef61d50237846b3f0a0\": container with ID starting with ec60b4519c14ce3660b04f5524c6a17a6d17b8be60e1eef61d50237846b3f0a0 not found: ID does not exist" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.556449 4910 scope.go:117] "RemoveContainer" containerID="bf7d568494db5335ab05824c2d442c84b4f341f5700727bfd3cab3eec55a5012" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.556718 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf7d568494db5335ab05824c2d442c84b4f341f5700727bfd3cab3eec55a5012"} err="failed to get container status \"bf7d568494db5335ab05824c2d442c84b4f341f5700727bfd3cab3eec55a5012\": rpc error: code = NotFound desc = could not find container \"bf7d568494db5335ab05824c2d442c84b4f341f5700727bfd3cab3eec55a5012\": container with ID starting with bf7d568494db5335ab05824c2d442c84b4f341f5700727bfd3cab3eec55a5012 not found: ID does not exist" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.556758 4910 scope.go:117] "RemoveContainer" containerID="ec60b4519c14ce3660b04f5524c6a17a6d17b8be60e1eef61d50237846b3f0a0" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.557023 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ec60b4519c14ce3660b04f5524c6a17a6d17b8be60e1eef61d50237846b3f0a0"} err="failed to get container status \"ec60b4519c14ce3660b04f5524c6a17a6d17b8be60e1eef61d50237846b3f0a0\": rpc error: code = NotFound desc = could not find container \"ec60b4519c14ce3660b04f5524c6a17a6d17b8be60e1eef61d50237846b3f0a0\": container with ID starting with ec60b4519c14ce3660b04f5524c6a17a6d17b8be60e1eef61d50237846b3f0a0 not found: ID does not exist" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.563215 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 23:22:45 crc kubenswrapper[4910]: E0105 23:22:45.563793 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa11d5de-6574-433a-a046-0853d973066b" containerName="glance-log" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.563816 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa11d5de-6574-433a-a046-0853d973066b" containerName="glance-log" Jan 05 23:22:45 crc kubenswrapper[4910]: E0105 23:22:45.563845 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa11d5de-6574-433a-a046-0853d973066b" containerName="glance-httpd" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.563856 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa11d5de-6574-433a-a046-0853d973066b" containerName="glance-httpd" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.564102 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa11d5de-6574-433a-a046-0853d973066b" containerName="glance-httpd" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.564150 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa11d5de-6574-433a-a046-0853d973066b" containerName="glance-log" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.566658 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.569161 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.590430 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.691540 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7459c264-36f9-4ebb-a162-81373cd02f98-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"7459c264-36f9-4ebb-a162-81373cd02f98\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.691589 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7459c264-36f9-4ebb-a162-81373cd02f98-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"7459c264-36f9-4ebb-a162-81373cd02f98\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.691630 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7459c264-36f9-4ebb-a162-81373cd02f98-config-data\") pod \"glance-default-external-api-0\" (UID: \"7459c264-36f9-4ebb-a162-81373cd02f98\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.691699 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7459c264-36f9-4ebb-a162-81373cd02f98-scripts\") pod \"glance-default-external-api-0\" (UID: \"7459c264-36f9-4ebb-a162-81373cd02f98\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.691739 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/7459c264-36f9-4ebb-a162-81373cd02f98-ceph\") pod \"glance-default-external-api-0\" (UID: \"7459c264-36f9-4ebb-a162-81373cd02f98\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.691754 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7459c264-36f9-4ebb-a162-81373cd02f98-logs\") pod \"glance-default-external-api-0\" (UID: \"7459c264-36f9-4ebb-a162-81373cd02f98\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.691787 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfznl\" (UniqueName: \"kubernetes.io/projected/7459c264-36f9-4ebb-a162-81373cd02f98-kube-api-access-pfznl\") pod \"glance-default-external-api-0\" (UID: \"7459c264-36f9-4ebb-a162-81373cd02f98\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.792951 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7459c264-36f9-4ebb-a162-81373cd02f98-scripts\") pod \"glance-default-external-api-0\" (UID: \"7459c264-36f9-4ebb-a162-81373cd02f98\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.793012 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/7459c264-36f9-4ebb-a162-81373cd02f98-ceph\") pod \"glance-default-external-api-0\" (UID: \"7459c264-36f9-4ebb-a162-81373cd02f98\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.793030 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7459c264-36f9-4ebb-a162-81373cd02f98-logs\") pod \"glance-default-external-api-0\" (UID: \"7459c264-36f9-4ebb-a162-81373cd02f98\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.793065 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfznl\" (UniqueName: \"kubernetes.io/projected/7459c264-36f9-4ebb-a162-81373cd02f98-kube-api-access-pfznl\") pod \"glance-default-external-api-0\" (UID: \"7459c264-36f9-4ebb-a162-81373cd02f98\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.793093 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7459c264-36f9-4ebb-a162-81373cd02f98-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"7459c264-36f9-4ebb-a162-81373cd02f98\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.793114 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7459c264-36f9-4ebb-a162-81373cd02f98-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"7459c264-36f9-4ebb-a162-81373cd02f98\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.793156 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7459c264-36f9-4ebb-a162-81373cd02f98-config-data\") pod \"glance-default-external-api-0\" (UID: \"7459c264-36f9-4ebb-a162-81373cd02f98\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.793882 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7459c264-36f9-4ebb-a162-81373cd02f98-logs\") pod \"glance-default-external-api-0\" (UID: \"7459c264-36f9-4ebb-a162-81373cd02f98\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.794455 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7459c264-36f9-4ebb-a162-81373cd02f98-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"7459c264-36f9-4ebb-a162-81373cd02f98\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.798248 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/7459c264-36f9-4ebb-a162-81373cd02f98-ceph\") pod \"glance-default-external-api-0\" (UID: \"7459c264-36f9-4ebb-a162-81373cd02f98\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.798960 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7459c264-36f9-4ebb-a162-81373cd02f98-config-data\") pod \"glance-default-external-api-0\" (UID: \"7459c264-36f9-4ebb-a162-81373cd02f98\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.799709 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7459c264-36f9-4ebb-a162-81373cd02f98-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"7459c264-36f9-4ebb-a162-81373cd02f98\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.800027 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7459c264-36f9-4ebb-a162-81373cd02f98-scripts\") pod \"glance-default-external-api-0\" (UID: \"7459c264-36f9-4ebb-a162-81373cd02f98\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.815764 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfznl\" (UniqueName: \"kubernetes.io/projected/7459c264-36f9-4ebb-a162-81373cd02f98-kube-api-access-pfznl\") pod \"glance-default-external-api-0\" (UID: \"7459c264-36f9-4ebb-a162-81373cd02f98\") " pod="openstack/glance-default-external-api-0" Jan 05 23:22:45 crc kubenswrapper[4910]: I0105 23:22:45.900339 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 05 23:22:46 crc kubenswrapper[4910]: I0105 23:22:46.476867 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="dc49cf3f-1ba4-419c-86f0-506b0001f341" containerName="glance-log" containerID="cri-o://e1672e9be51b76e794618e1859faf0b93f3dcd473e3c46211dc250781b61aedf" gracePeriod=30 Jan 05 23:22:46 crc kubenswrapper[4910]: I0105 23:22:46.477336 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="dc49cf3f-1ba4-419c-86f0-506b0001f341" containerName="glance-httpd" containerID="cri-o://c8fdcd8725e763daac429ed2f4d8366a842d6a99a8136799c3c2ea897b300116" gracePeriod=30 Jan 05 23:22:46 crc kubenswrapper[4910]: I0105 23:22:46.510765 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 23:22:46 crc kubenswrapper[4910]: I0105 23:22:46.738512 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa11d5de-6574-433a-a046-0853d973066b" path="/var/lib/kubelet/pods/aa11d5de-6574-433a-a046-0853d973066b/volumes" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.326038 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.431728 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-knt4z\" (UniqueName: \"kubernetes.io/projected/dc49cf3f-1ba4-419c-86f0-506b0001f341-kube-api-access-knt4z\") pod \"dc49cf3f-1ba4-419c-86f0-506b0001f341\" (UID: \"dc49cf3f-1ba4-419c-86f0-506b0001f341\") " Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.431936 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/dc49cf3f-1ba4-419c-86f0-506b0001f341-ceph\") pod \"dc49cf3f-1ba4-419c-86f0-506b0001f341\" (UID: \"dc49cf3f-1ba4-419c-86f0-506b0001f341\") " Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.432115 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc49cf3f-1ba4-419c-86f0-506b0001f341-combined-ca-bundle\") pod \"dc49cf3f-1ba4-419c-86f0-506b0001f341\" (UID: \"dc49cf3f-1ba4-419c-86f0-506b0001f341\") " Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.432164 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dc49cf3f-1ba4-419c-86f0-506b0001f341-logs\") pod \"dc49cf3f-1ba4-419c-86f0-506b0001f341\" (UID: \"dc49cf3f-1ba4-419c-86f0-506b0001f341\") " Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.432198 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dc49cf3f-1ba4-419c-86f0-506b0001f341-config-data\") pod \"dc49cf3f-1ba4-419c-86f0-506b0001f341\" (UID: \"dc49cf3f-1ba4-419c-86f0-506b0001f341\") " Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.432309 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/dc49cf3f-1ba4-419c-86f0-506b0001f341-httpd-run\") pod \"dc49cf3f-1ba4-419c-86f0-506b0001f341\" (UID: \"dc49cf3f-1ba4-419c-86f0-506b0001f341\") " Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.432375 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dc49cf3f-1ba4-419c-86f0-506b0001f341-scripts\") pod \"dc49cf3f-1ba4-419c-86f0-506b0001f341\" (UID: \"dc49cf3f-1ba4-419c-86f0-506b0001f341\") " Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.432669 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dc49cf3f-1ba4-419c-86f0-506b0001f341-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "dc49cf3f-1ba4-419c-86f0-506b0001f341" (UID: "dc49cf3f-1ba4-419c-86f0-506b0001f341"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.433146 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dc49cf3f-1ba4-419c-86f0-506b0001f341-logs" (OuterVolumeSpecName: "logs") pod "dc49cf3f-1ba4-419c-86f0-506b0001f341" (UID: "dc49cf3f-1ba4-419c-86f0-506b0001f341"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.433556 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dc49cf3f-1ba4-419c-86f0-506b0001f341-logs\") on node \"crc\" DevicePath \"\"" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.433574 4910 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/dc49cf3f-1ba4-419c-86f0-506b0001f341-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.436306 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc49cf3f-1ba4-419c-86f0-506b0001f341-kube-api-access-knt4z" (OuterVolumeSpecName: "kube-api-access-knt4z") pod "dc49cf3f-1ba4-419c-86f0-506b0001f341" (UID: "dc49cf3f-1ba4-419c-86f0-506b0001f341"). InnerVolumeSpecName "kube-api-access-knt4z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.436480 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc49cf3f-1ba4-419c-86f0-506b0001f341-ceph" (OuterVolumeSpecName: "ceph") pod "dc49cf3f-1ba4-419c-86f0-506b0001f341" (UID: "dc49cf3f-1ba4-419c-86f0-506b0001f341"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.436628 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc49cf3f-1ba4-419c-86f0-506b0001f341-scripts" (OuterVolumeSpecName: "scripts") pod "dc49cf3f-1ba4-419c-86f0-506b0001f341" (UID: "dc49cf3f-1ba4-419c-86f0-506b0001f341"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.454593 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc49cf3f-1ba4-419c-86f0-506b0001f341-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dc49cf3f-1ba4-419c-86f0-506b0001f341" (UID: "dc49cf3f-1ba4-419c-86f0-506b0001f341"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.482319 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc49cf3f-1ba4-419c-86f0-506b0001f341-config-data" (OuterVolumeSpecName: "config-data") pod "dc49cf3f-1ba4-419c-86f0-506b0001f341" (UID: "dc49cf3f-1ba4-419c-86f0-506b0001f341"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.489918 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"7459c264-36f9-4ebb-a162-81373cd02f98","Type":"ContainerStarted","Data":"c5487ef6b746df81b6254d06d90f7b6250dd82f9749d5814dfa3283645590f5e"} Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.490760 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"7459c264-36f9-4ebb-a162-81373cd02f98","Type":"ContainerStarted","Data":"ec5dcfb21704f4b1be94c5c8db9a13f4ef9b098f3186417dea7bae3702f871ef"} Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.493298 4910 generic.go:334] "Generic (PLEG): container finished" podID="dc49cf3f-1ba4-419c-86f0-506b0001f341" containerID="c8fdcd8725e763daac429ed2f4d8366a842d6a99a8136799c3c2ea897b300116" exitCode=0 Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.493340 4910 generic.go:334] "Generic (PLEG): container finished" podID="dc49cf3f-1ba4-419c-86f0-506b0001f341" containerID="e1672e9be51b76e794618e1859faf0b93f3dcd473e3c46211dc250781b61aedf" exitCode=143 Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.493376 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"dc49cf3f-1ba4-419c-86f0-506b0001f341","Type":"ContainerDied","Data":"c8fdcd8725e763daac429ed2f4d8366a842d6a99a8136799c3c2ea897b300116"} Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.493402 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.493421 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"dc49cf3f-1ba4-419c-86f0-506b0001f341","Type":"ContainerDied","Data":"e1672e9be51b76e794618e1859faf0b93f3dcd473e3c46211dc250781b61aedf"} Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.493436 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"dc49cf3f-1ba4-419c-86f0-506b0001f341","Type":"ContainerDied","Data":"f90683aeeb53275737cc7a690b5ea1b13439e2688a4d9bdb892adf5cc02c4b85"} Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.493457 4910 scope.go:117] "RemoveContainer" containerID="c8fdcd8725e763daac429ed2f4d8366a842d6a99a8136799c3c2ea897b300116" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.535785 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc49cf3f-1ba4-419c-86f0-506b0001f341-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.535834 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dc49cf3f-1ba4-419c-86f0-506b0001f341-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.535854 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dc49cf3f-1ba4-419c-86f0-506b0001f341-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.535879 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-knt4z\" (UniqueName: \"kubernetes.io/projected/dc49cf3f-1ba4-419c-86f0-506b0001f341-kube-api-access-knt4z\") on node \"crc\" DevicePath \"\"" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.535898 4910 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/dc49cf3f-1ba4-419c-86f0-506b0001f341-ceph\") on node \"crc\" DevicePath \"\"" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.546421 4910 scope.go:117] "RemoveContainer" containerID="e1672e9be51b76e794618e1859faf0b93f3dcd473e3c46211dc250781b61aedf" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.572086 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.587566 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.592987 4910 scope.go:117] "RemoveContainer" containerID="c8fdcd8725e763daac429ed2f4d8366a842d6a99a8136799c3c2ea897b300116" Jan 05 23:22:47 crc kubenswrapper[4910]: E0105 23:22:47.594837 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c8fdcd8725e763daac429ed2f4d8366a842d6a99a8136799c3c2ea897b300116\": container with ID starting with c8fdcd8725e763daac429ed2f4d8366a842d6a99a8136799c3c2ea897b300116 not found: ID does not exist" containerID="c8fdcd8725e763daac429ed2f4d8366a842d6a99a8136799c3c2ea897b300116" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.594875 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c8fdcd8725e763daac429ed2f4d8366a842d6a99a8136799c3c2ea897b300116"} err="failed to get container status \"c8fdcd8725e763daac429ed2f4d8366a842d6a99a8136799c3c2ea897b300116\": rpc error: code = NotFound desc = could not find container \"c8fdcd8725e763daac429ed2f4d8366a842d6a99a8136799c3c2ea897b300116\": container with ID starting with c8fdcd8725e763daac429ed2f4d8366a842d6a99a8136799c3c2ea897b300116 not found: ID does not exist" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.594910 4910 scope.go:117] "RemoveContainer" containerID="e1672e9be51b76e794618e1859faf0b93f3dcd473e3c46211dc250781b61aedf" Jan 05 23:22:47 crc kubenswrapper[4910]: E0105 23:22:47.595542 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1672e9be51b76e794618e1859faf0b93f3dcd473e3c46211dc250781b61aedf\": container with ID starting with e1672e9be51b76e794618e1859faf0b93f3dcd473e3c46211dc250781b61aedf not found: ID does not exist" containerID="e1672e9be51b76e794618e1859faf0b93f3dcd473e3c46211dc250781b61aedf" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.595572 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1672e9be51b76e794618e1859faf0b93f3dcd473e3c46211dc250781b61aedf"} err="failed to get container status \"e1672e9be51b76e794618e1859faf0b93f3dcd473e3c46211dc250781b61aedf\": rpc error: code = NotFound desc = could not find container \"e1672e9be51b76e794618e1859faf0b93f3dcd473e3c46211dc250781b61aedf\": container with ID starting with e1672e9be51b76e794618e1859faf0b93f3dcd473e3c46211dc250781b61aedf not found: ID does not exist" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.595592 4910 scope.go:117] "RemoveContainer" containerID="c8fdcd8725e763daac429ed2f4d8366a842d6a99a8136799c3c2ea897b300116" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.596085 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c8fdcd8725e763daac429ed2f4d8366a842d6a99a8136799c3c2ea897b300116"} err="failed to get container status \"c8fdcd8725e763daac429ed2f4d8366a842d6a99a8136799c3c2ea897b300116\": rpc error: code = NotFound desc = could not find container \"c8fdcd8725e763daac429ed2f4d8366a842d6a99a8136799c3c2ea897b300116\": container with ID starting with c8fdcd8725e763daac429ed2f4d8366a842d6a99a8136799c3c2ea897b300116 not found: ID does not exist" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.596215 4910 scope.go:117] "RemoveContainer" containerID="e1672e9be51b76e794618e1859faf0b93f3dcd473e3c46211dc250781b61aedf" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.600040 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 23:22:47 crc kubenswrapper[4910]: E0105 23:22:47.600450 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc49cf3f-1ba4-419c-86f0-506b0001f341" containerName="glance-httpd" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.600462 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc49cf3f-1ba4-419c-86f0-506b0001f341" containerName="glance-httpd" Jan 05 23:22:47 crc kubenswrapper[4910]: E0105 23:22:47.600476 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc49cf3f-1ba4-419c-86f0-506b0001f341" containerName="glance-log" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.600482 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc49cf3f-1ba4-419c-86f0-506b0001f341" containerName="glance-log" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.600678 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc49cf3f-1ba4-419c-86f0-506b0001f341" containerName="glance-log" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.600690 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc49cf3f-1ba4-419c-86f0-506b0001f341" containerName="glance-httpd" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.601613 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.602295 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1672e9be51b76e794618e1859faf0b93f3dcd473e3c46211dc250781b61aedf"} err="failed to get container status \"e1672e9be51b76e794618e1859faf0b93f3dcd473e3c46211dc250781b61aedf\": rpc error: code = NotFound desc = could not find container \"e1672e9be51b76e794618e1859faf0b93f3dcd473e3c46211dc250781b61aedf\": container with ID starting with e1672e9be51b76e794618e1859faf0b93f3dcd473e3c46211dc250781b61aedf not found: ID does not exist" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.608937 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.610562 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.751218 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f69cb6f3-1485-4413-81f5-4de7a3d72609-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f69cb6f3-1485-4413-81f5-4de7a3d72609\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.751644 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f69cb6f3-1485-4413-81f5-4de7a3d72609-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f69cb6f3-1485-4413-81f5-4de7a3d72609\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.751682 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xdmr4\" (UniqueName: \"kubernetes.io/projected/f69cb6f3-1485-4413-81f5-4de7a3d72609-kube-api-access-xdmr4\") pod \"glance-default-internal-api-0\" (UID: \"f69cb6f3-1485-4413-81f5-4de7a3d72609\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.752702 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f69cb6f3-1485-4413-81f5-4de7a3d72609-logs\") pod \"glance-default-internal-api-0\" (UID: \"f69cb6f3-1485-4413-81f5-4de7a3d72609\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.752742 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/f69cb6f3-1485-4413-81f5-4de7a3d72609-ceph\") pod \"glance-default-internal-api-0\" (UID: \"f69cb6f3-1485-4413-81f5-4de7a3d72609\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.752832 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f69cb6f3-1485-4413-81f5-4de7a3d72609-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f69cb6f3-1485-4413-81f5-4de7a3d72609\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.752862 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f69cb6f3-1485-4413-81f5-4de7a3d72609-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f69cb6f3-1485-4413-81f5-4de7a3d72609\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.854130 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f69cb6f3-1485-4413-81f5-4de7a3d72609-logs\") pod \"glance-default-internal-api-0\" (UID: \"f69cb6f3-1485-4413-81f5-4de7a3d72609\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.854187 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/f69cb6f3-1485-4413-81f5-4de7a3d72609-ceph\") pod \"glance-default-internal-api-0\" (UID: \"f69cb6f3-1485-4413-81f5-4de7a3d72609\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.854242 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f69cb6f3-1485-4413-81f5-4de7a3d72609-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f69cb6f3-1485-4413-81f5-4de7a3d72609\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.854263 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f69cb6f3-1485-4413-81f5-4de7a3d72609-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f69cb6f3-1485-4413-81f5-4de7a3d72609\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.854804 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f69cb6f3-1485-4413-81f5-4de7a3d72609-logs\") pod \"glance-default-internal-api-0\" (UID: \"f69cb6f3-1485-4413-81f5-4de7a3d72609\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.854306 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f69cb6f3-1485-4413-81f5-4de7a3d72609-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f69cb6f3-1485-4413-81f5-4de7a3d72609\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.855345 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f69cb6f3-1485-4413-81f5-4de7a3d72609-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f69cb6f3-1485-4413-81f5-4de7a3d72609\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.855368 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xdmr4\" (UniqueName: \"kubernetes.io/projected/f69cb6f3-1485-4413-81f5-4de7a3d72609-kube-api-access-xdmr4\") pod \"glance-default-internal-api-0\" (UID: \"f69cb6f3-1485-4413-81f5-4de7a3d72609\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.855653 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f69cb6f3-1485-4413-81f5-4de7a3d72609-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f69cb6f3-1485-4413-81f5-4de7a3d72609\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.858984 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f69cb6f3-1485-4413-81f5-4de7a3d72609-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f69cb6f3-1485-4413-81f5-4de7a3d72609\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.859272 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f69cb6f3-1485-4413-81f5-4de7a3d72609-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f69cb6f3-1485-4413-81f5-4de7a3d72609\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.859417 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/f69cb6f3-1485-4413-81f5-4de7a3d72609-ceph\") pod \"glance-default-internal-api-0\" (UID: \"f69cb6f3-1485-4413-81f5-4de7a3d72609\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.860393 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f69cb6f3-1485-4413-81f5-4de7a3d72609-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f69cb6f3-1485-4413-81f5-4de7a3d72609\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.872536 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xdmr4\" (UniqueName: \"kubernetes.io/projected/f69cb6f3-1485-4413-81f5-4de7a3d72609-kube-api-access-xdmr4\") pod \"glance-default-internal-api-0\" (UID: \"f69cb6f3-1485-4413-81f5-4de7a3d72609\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:22:47 crc kubenswrapper[4910]: I0105 23:22:47.928829 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 05 23:22:48 crc kubenswrapper[4910]: I0105 23:22:48.502791 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"7459c264-36f9-4ebb-a162-81373cd02f98","Type":"ContainerStarted","Data":"dc4dba090d7651413c055c546670049766a065a27e57e4aefcf6a51f23ebe82d"} Jan 05 23:22:48 crc kubenswrapper[4910]: I0105 23:22:48.534024 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.533997933 podStartE2EDuration="3.533997933s" podCreationTimestamp="2026-01-05 23:22:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:22:48.529849731 +0000 UTC m=+5500.107347421" watchObservedRunningTime="2026-01-05 23:22:48.533997933 +0000 UTC m=+5500.111495603" Jan 05 23:22:48 crc kubenswrapper[4910]: I0105 23:22:48.551474 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 23:22:48 crc kubenswrapper[4910]: W0105 23:22:48.561379 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf69cb6f3_1485_4413_81f5_4de7a3d72609.slice/crio-fcd222523118aee142990a36de6a0bc6d95a2e88e525012fe4354d258f05b815 WatchSource:0}: Error finding container fcd222523118aee142990a36de6a0bc6d95a2e88e525012fe4354d258f05b815: Status 404 returned error can't find the container with id fcd222523118aee142990a36de6a0bc6d95a2e88e525012fe4354d258f05b815 Jan 05 23:22:48 crc kubenswrapper[4910]: I0105 23:22:48.738068 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc49cf3f-1ba4-419c-86f0-506b0001f341" path="/var/lib/kubelet/pods/dc49cf3f-1ba4-419c-86f0-506b0001f341/volumes" Jan 05 23:22:49 crc kubenswrapper[4910]: I0105 23:22:49.532408 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f69cb6f3-1485-4413-81f5-4de7a3d72609","Type":"ContainerStarted","Data":"1a3e747a2e6d1e2dea580be497d7cb58b4675ce09d5d43e27b1d7425d2f89855"} Jan 05 23:22:49 crc kubenswrapper[4910]: I0105 23:22:49.532693 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f69cb6f3-1485-4413-81f5-4de7a3d72609","Type":"ContainerStarted","Data":"fcd222523118aee142990a36de6a0bc6d95a2e88e525012fe4354d258f05b815"} Jan 05 23:22:50 crc kubenswrapper[4910]: I0105 23:22:50.564062 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f69cb6f3-1485-4413-81f5-4de7a3d72609","Type":"ContainerStarted","Data":"831350d2b60a3338b1015b52db22974f9781c28f010ca524b149391e7ddbd02a"} Jan 05 23:22:50 crc kubenswrapper[4910]: I0105 23:22:50.620925 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.620893242 podStartE2EDuration="3.620893242s" podCreationTimestamp="2026-01-05 23:22:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:22:50.610032613 +0000 UTC m=+5502.187530323" watchObservedRunningTime="2026-01-05 23:22:50.620893242 +0000 UTC m=+5502.198390952" Jan 05 23:22:52 crc kubenswrapper[4910]: I0105 23:22:52.267428 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-bf548fdbf-ns22l" Jan 05 23:22:52 crc kubenswrapper[4910]: I0105 23:22:52.419571 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c4486bb9f-r5zvb"] Jan 05 23:22:52 crc kubenswrapper[4910]: I0105 23:22:52.419896 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7c4486bb9f-r5zvb" podUID="09af03d5-cfaf-400d-bfb2-bb08f1b57d45" containerName="dnsmasq-dns" containerID="cri-o://b2961f100b2611bbd0afb9773d4dd510a3e79c06e8b1197fe9dcd5009238aa05" gracePeriod=10 Jan 05 23:22:52 crc kubenswrapper[4910]: I0105 23:22:52.597741 4910 generic.go:334] "Generic (PLEG): container finished" podID="09af03d5-cfaf-400d-bfb2-bb08f1b57d45" containerID="b2961f100b2611bbd0afb9773d4dd510a3e79c06e8b1197fe9dcd5009238aa05" exitCode=0 Jan 05 23:22:52 crc kubenswrapper[4910]: I0105 23:22:52.597801 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c4486bb9f-r5zvb" event={"ID":"09af03d5-cfaf-400d-bfb2-bb08f1b57d45","Type":"ContainerDied","Data":"b2961f100b2611bbd0afb9773d4dd510a3e79c06e8b1197fe9dcd5009238aa05"} Jan 05 23:22:52 crc kubenswrapper[4910]: I0105 23:22:52.889264 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c4486bb9f-r5zvb" Jan 05 23:22:52 crc kubenswrapper[4910]: I0105 23:22:52.964519 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09af03d5-cfaf-400d-bfb2-bb08f1b57d45-config\") pod \"09af03d5-cfaf-400d-bfb2-bb08f1b57d45\" (UID: \"09af03d5-cfaf-400d-bfb2-bb08f1b57d45\") " Jan 05 23:22:52 crc kubenswrapper[4910]: I0105 23:22:52.964693 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m2mwt\" (UniqueName: \"kubernetes.io/projected/09af03d5-cfaf-400d-bfb2-bb08f1b57d45-kube-api-access-m2mwt\") pod \"09af03d5-cfaf-400d-bfb2-bb08f1b57d45\" (UID: \"09af03d5-cfaf-400d-bfb2-bb08f1b57d45\") " Jan 05 23:22:52 crc kubenswrapper[4910]: I0105 23:22:52.964731 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09af03d5-cfaf-400d-bfb2-bb08f1b57d45-dns-svc\") pod \"09af03d5-cfaf-400d-bfb2-bb08f1b57d45\" (UID: \"09af03d5-cfaf-400d-bfb2-bb08f1b57d45\") " Jan 05 23:22:52 crc kubenswrapper[4910]: I0105 23:22:52.964900 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/09af03d5-cfaf-400d-bfb2-bb08f1b57d45-ovsdbserver-nb\") pod \"09af03d5-cfaf-400d-bfb2-bb08f1b57d45\" (UID: \"09af03d5-cfaf-400d-bfb2-bb08f1b57d45\") " Jan 05 23:22:52 crc kubenswrapper[4910]: I0105 23:22:52.964923 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/09af03d5-cfaf-400d-bfb2-bb08f1b57d45-ovsdbserver-sb\") pod \"09af03d5-cfaf-400d-bfb2-bb08f1b57d45\" (UID: \"09af03d5-cfaf-400d-bfb2-bb08f1b57d45\") " Jan 05 23:22:52 crc kubenswrapper[4910]: I0105 23:22:52.971707 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09af03d5-cfaf-400d-bfb2-bb08f1b57d45-kube-api-access-m2mwt" (OuterVolumeSpecName: "kube-api-access-m2mwt") pod "09af03d5-cfaf-400d-bfb2-bb08f1b57d45" (UID: "09af03d5-cfaf-400d-bfb2-bb08f1b57d45"). InnerVolumeSpecName "kube-api-access-m2mwt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:22:53 crc kubenswrapper[4910]: I0105 23:22:53.010749 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09af03d5-cfaf-400d-bfb2-bb08f1b57d45-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "09af03d5-cfaf-400d-bfb2-bb08f1b57d45" (UID: "09af03d5-cfaf-400d-bfb2-bb08f1b57d45"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:22:53 crc kubenswrapper[4910]: I0105 23:22:53.022961 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09af03d5-cfaf-400d-bfb2-bb08f1b57d45-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "09af03d5-cfaf-400d-bfb2-bb08f1b57d45" (UID: "09af03d5-cfaf-400d-bfb2-bb08f1b57d45"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:22:53 crc kubenswrapper[4910]: I0105 23:22:53.023162 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09af03d5-cfaf-400d-bfb2-bb08f1b57d45-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "09af03d5-cfaf-400d-bfb2-bb08f1b57d45" (UID: "09af03d5-cfaf-400d-bfb2-bb08f1b57d45"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:22:53 crc kubenswrapper[4910]: I0105 23:22:53.047542 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09af03d5-cfaf-400d-bfb2-bb08f1b57d45-config" (OuterVolumeSpecName: "config") pod "09af03d5-cfaf-400d-bfb2-bb08f1b57d45" (UID: "09af03d5-cfaf-400d-bfb2-bb08f1b57d45"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:22:53 crc kubenswrapper[4910]: I0105 23:22:53.066513 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/09af03d5-cfaf-400d-bfb2-bb08f1b57d45-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 05 23:22:53 crc kubenswrapper[4910]: I0105 23:22:53.066581 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/09af03d5-cfaf-400d-bfb2-bb08f1b57d45-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 05 23:22:53 crc kubenswrapper[4910]: I0105 23:22:53.066596 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09af03d5-cfaf-400d-bfb2-bb08f1b57d45-config\") on node \"crc\" DevicePath \"\"" Jan 05 23:22:53 crc kubenswrapper[4910]: I0105 23:22:53.066609 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m2mwt\" (UniqueName: \"kubernetes.io/projected/09af03d5-cfaf-400d-bfb2-bb08f1b57d45-kube-api-access-m2mwt\") on node \"crc\" DevicePath \"\"" Jan 05 23:22:53 crc kubenswrapper[4910]: I0105 23:22:53.066623 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09af03d5-cfaf-400d-bfb2-bb08f1b57d45-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 05 23:22:53 crc kubenswrapper[4910]: I0105 23:22:53.613700 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c4486bb9f-r5zvb" event={"ID":"09af03d5-cfaf-400d-bfb2-bb08f1b57d45","Type":"ContainerDied","Data":"7e6e56372f6f668cd31373c5f401d689ed93db54e2163ae2a0dddff1748aa17d"} Jan 05 23:22:53 crc kubenswrapper[4910]: I0105 23:22:53.613809 4910 scope.go:117] "RemoveContainer" containerID="b2961f100b2611bbd0afb9773d4dd510a3e79c06e8b1197fe9dcd5009238aa05" Jan 05 23:22:53 crc kubenswrapper[4910]: I0105 23:22:53.614158 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c4486bb9f-r5zvb" Jan 05 23:22:53 crc kubenswrapper[4910]: I0105 23:22:53.640306 4910 scope.go:117] "RemoveContainer" containerID="5f4fbc7211bda5403a5df8ad2b4263286f1d8b924076792a7f6a3b7759e1ea9b" Jan 05 23:22:53 crc kubenswrapper[4910]: I0105 23:22:53.677543 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c4486bb9f-r5zvb"] Jan 05 23:22:53 crc kubenswrapper[4910]: I0105 23:22:53.684690 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7c4486bb9f-r5zvb"] Jan 05 23:22:54 crc kubenswrapper[4910]: I0105 23:22:54.732982 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09af03d5-cfaf-400d-bfb2-bb08f1b57d45" path="/var/lib/kubelet/pods/09af03d5-cfaf-400d-bfb2-bb08f1b57d45/volumes" Jan 05 23:22:55 crc kubenswrapper[4910]: I0105 23:22:55.901032 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 05 23:22:55 crc kubenswrapper[4910]: I0105 23:22:55.901284 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 05 23:22:55 crc kubenswrapper[4910]: I0105 23:22:55.941655 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 05 23:22:55 crc kubenswrapper[4910]: I0105 23:22:55.983405 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 05 23:22:56 crc kubenswrapper[4910]: I0105 23:22:56.654248 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 05 23:22:56 crc kubenswrapper[4910]: I0105 23:22:56.654628 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 05 23:22:56 crc kubenswrapper[4910]: I0105 23:22:56.722201 4910 scope.go:117] "RemoveContainer" containerID="c31f0b3ea3ce0d4b0fb62da87fc6d8144c3940f5821592c646ec514147dae31e" Jan 05 23:22:56 crc kubenswrapper[4910]: E0105 23:22:56.722756 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:22:57 crc kubenswrapper[4910]: I0105 23:22:57.929256 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 05 23:22:57 crc kubenswrapper[4910]: I0105 23:22:57.933980 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 05 23:22:57 crc kubenswrapper[4910]: I0105 23:22:57.976023 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 05 23:22:57 crc kubenswrapper[4910]: I0105 23:22:57.992819 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 05 23:22:58 crc kubenswrapper[4910]: I0105 23:22:58.649218 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 05 23:22:58 crc kubenswrapper[4910]: I0105 23:22:58.676781 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 05 23:22:58 crc kubenswrapper[4910]: I0105 23:22:58.690524 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 05 23:22:58 crc kubenswrapper[4910]: I0105 23:22:58.694848 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 05 23:23:00 crc kubenswrapper[4910]: I0105 23:23:00.751066 4910 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 05 23:23:00 crc kubenswrapper[4910]: I0105 23:23:00.751753 4910 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 05 23:23:00 crc kubenswrapper[4910]: I0105 23:23:00.781305 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 05 23:23:00 crc kubenswrapper[4910]: I0105 23:23:00.928888 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 05 23:23:07 crc kubenswrapper[4910]: I0105 23:23:07.450345 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-5z8ph"] Jan 05 23:23:07 crc kubenswrapper[4910]: E0105 23:23:07.451346 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09af03d5-cfaf-400d-bfb2-bb08f1b57d45" containerName="init" Jan 05 23:23:07 crc kubenswrapper[4910]: I0105 23:23:07.451364 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="09af03d5-cfaf-400d-bfb2-bb08f1b57d45" containerName="init" Jan 05 23:23:07 crc kubenswrapper[4910]: E0105 23:23:07.451392 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09af03d5-cfaf-400d-bfb2-bb08f1b57d45" containerName="dnsmasq-dns" Jan 05 23:23:07 crc kubenswrapper[4910]: I0105 23:23:07.451399 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="09af03d5-cfaf-400d-bfb2-bb08f1b57d45" containerName="dnsmasq-dns" Jan 05 23:23:07 crc kubenswrapper[4910]: I0105 23:23:07.451574 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="09af03d5-cfaf-400d-bfb2-bb08f1b57d45" containerName="dnsmasq-dns" Jan 05 23:23:07 crc kubenswrapper[4910]: I0105 23:23:07.452347 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-5z8ph" Jan 05 23:23:07 crc kubenswrapper[4910]: I0105 23:23:07.460213 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-5z8ph"] Jan 05 23:23:07 crc kubenswrapper[4910]: I0105 23:23:07.507742 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-af75-account-create-update-w7xpn"] Jan 05 23:23:07 crc kubenswrapper[4910]: I0105 23:23:07.509101 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-af75-account-create-update-w7xpn" Jan 05 23:23:07 crc kubenswrapper[4910]: I0105 23:23:07.517045 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Jan 05 23:23:07 crc kubenswrapper[4910]: I0105 23:23:07.519205 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6679f87f-3fe1-413f-bf56-c5a2c56de981-operator-scripts\") pod \"placement-db-create-5z8ph\" (UID: \"6679f87f-3fe1-413f-bf56-c5a2c56de981\") " pod="openstack/placement-db-create-5z8ph" Jan 05 23:23:07 crc kubenswrapper[4910]: I0105 23:23:07.519252 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26hsf\" (UniqueName: \"kubernetes.io/projected/6679f87f-3fe1-413f-bf56-c5a2c56de981-kube-api-access-26hsf\") pod \"placement-db-create-5z8ph\" (UID: \"6679f87f-3fe1-413f-bf56-c5a2c56de981\") " pod="openstack/placement-db-create-5z8ph" Jan 05 23:23:07 crc kubenswrapper[4910]: I0105 23:23:07.531310 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-af75-account-create-update-w7xpn"] Jan 05 23:23:07 crc kubenswrapper[4910]: I0105 23:23:07.621169 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6679f87f-3fe1-413f-bf56-c5a2c56de981-operator-scripts\") pod \"placement-db-create-5z8ph\" (UID: \"6679f87f-3fe1-413f-bf56-c5a2c56de981\") " pod="openstack/placement-db-create-5z8ph" Jan 05 23:23:07 crc kubenswrapper[4910]: I0105 23:23:07.621256 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26hsf\" (UniqueName: \"kubernetes.io/projected/6679f87f-3fe1-413f-bf56-c5a2c56de981-kube-api-access-26hsf\") pod \"placement-db-create-5z8ph\" (UID: \"6679f87f-3fe1-413f-bf56-c5a2c56de981\") " pod="openstack/placement-db-create-5z8ph" Jan 05 23:23:07 crc kubenswrapper[4910]: I0105 23:23:07.621324 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e381097e-e5fe-4f7c-b5da-afe0bab26a73-operator-scripts\") pod \"placement-af75-account-create-update-w7xpn\" (UID: \"e381097e-e5fe-4f7c-b5da-afe0bab26a73\") " pod="openstack/placement-af75-account-create-update-w7xpn" Jan 05 23:23:07 crc kubenswrapper[4910]: I0105 23:23:07.621397 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8n997\" (UniqueName: \"kubernetes.io/projected/e381097e-e5fe-4f7c-b5da-afe0bab26a73-kube-api-access-8n997\") pod \"placement-af75-account-create-update-w7xpn\" (UID: \"e381097e-e5fe-4f7c-b5da-afe0bab26a73\") " pod="openstack/placement-af75-account-create-update-w7xpn" Jan 05 23:23:07 crc kubenswrapper[4910]: I0105 23:23:07.622322 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6679f87f-3fe1-413f-bf56-c5a2c56de981-operator-scripts\") pod \"placement-db-create-5z8ph\" (UID: \"6679f87f-3fe1-413f-bf56-c5a2c56de981\") " pod="openstack/placement-db-create-5z8ph" Jan 05 23:23:07 crc kubenswrapper[4910]: I0105 23:23:07.643673 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26hsf\" (UniqueName: \"kubernetes.io/projected/6679f87f-3fe1-413f-bf56-c5a2c56de981-kube-api-access-26hsf\") pod \"placement-db-create-5z8ph\" (UID: \"6679f87f-3fe1-413f-bf56-c5a2c56de981\") " pod="openstack/placement-db-create-5z8ph" Jan 05 23:23:07 crc kubenswrapper[4910]: I0105 23:23:07.722393 4910 scope.go:117] "RemoveContainer" containerID="c31f0b3ea3ce0d4b0fb62da87fc6d8144c3940f5821592c646ec514147dae31e" Jan 05 23:23:07 crc kubenswrapper[4910]: E0105 23:23:07.723275 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:23:07 crc kubenswrapper[4910]: I0105 23:23:07.724487 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e381097e-e5fe-4f7c-b5da-afe0bab26a73-operator-scripts\") pod \"placement-af75-account-create-update-w7xpn\" (UID: \"e381097e-e5fe-4f7c-b5da-afe0bab26a73\") " pod="openstack/placement-af75-account-create-update-w7xpn" Jan 05 23:23:07 crc kubenswrapper[4910]: I0105 23:23:07.724603 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8n997\" (UniqueName: \"kubernetes.io/projected/e381097e-e5fe-4f7c-b5da-afe0bab26a73-kube-api-access-8n997\") pod \"placement-af75-account-create-update-w7xpn\" (UID: \"e381097e-e5fe-4f7c-b5da-afe0bab26a73\") " pod="openstack/placement-af75-account-create-update-w7xpn" Jan 05 23:23:07 crc kubenswrapper[4910]: I0105 23:23:07.726141 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e381097e-e5fe-4f7c-b5da-afe0bab26a73-operator-scripts\") pod \"placement-af75-account-create-update-w7xpn\" (UID: \"e381097e-e5fe-4f7c-b5da-afe0bab26a73\") " pod="openstack/placement-af75-account-create-update-w7xpn" Jan 05 23:23:07 crc kubenswrapper[4910]: I0105 23:23:07.751053 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8n997\" (UniqueName: \"kubernetes.io/projected/e381097e-e5fe-4f7c-b5da-afe0bab26a73-kube-api-access-8n997\") pod \"placement-af75-account-create-update-w7xpn\" (UID: \"e381097e-e5fe-4f7c-b5da-afe0bab26a73\") " pod="openstack/placement-af75-account-create-update-w7xpn" Jan 05 23:23:07 crc kubenswrapper[4910]: I0105 23:23:07.786205 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-5z8ph" Jan 05 23:23:07 crc kubenswrapper[4910]: I0105 23:23:07.881139 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-af75-account-create-update-w7xpn" Jan 05 23:23:08 crc kubenswrapper[4910]: I0105 23:23:08.321504 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-5z8ph"] Jan 05 23:23:08 crc kubenswrapper[4910]: W0105 23:23:08.330103 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6679f87f_3fe1_413f_bf56_c5a2c56de981.slice/crio-d8d7c147e208965cdb91be0885cc9ccc8edce7993caea46c9f76bec93d716333 WatchSource:0}: Error finding container d8d7c147e208965cdb91be0885cc9ccc8edce7993caea46c9f76bec93d716333: Status 404 returned error can't find the container with id d8d7c147e208965cdb91be0885cc9ccc8edce7993caea46c9f76bec93d716333 Jan 05 23:23:08 crc kubenswrapper[4910]: I0105 23:23:08.436600 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-af75-account-create-update-w7xpn"] Jan 05 23:23:08 crc kubenswrapper[4910]: W0105 23:23:08.456104 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode381097e_e5fe_4f7c_b5da_afe0bab26a73.slice/crio-2d4a93c4a0b35c3aae358c8c84b4ea4431f5ff731ebca81f6ba5fcdabc3de0a4 WatchSource:0}: Error finding container 2d4a93c4a0b35c3aae358c8c84b4ea4431f5ff731ebca81f6ba5fcdabc3de0a4: Status 404 returned error can't find the container with id 2d4a93c4a0b35c3aae358c8c84b4ea4431f5ff731ebca81f6ba5fcdabc3de0a4 Jan 05 23:23:08 crc kubenswrapper[4910]: I0105 23:23:08.848094 4910 generic.go:334] "Generic (PLEG): container finished" podID="e381097e-e5fe-4f7c-b5da-afe0bab26a73" containerID="cdcd0efd55cb0eec9e739b179e9ad4dd3b4a5a19b1286d663c3a35f702514c63" exitCode=0 Jan 05 23:23:08 crc kubenswrapper[4910]: I0105 23:23:08.848172 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-af75-account-create-update-w7xpn" event={"ID":"e381097e-e5fe-4f7c-b5da-afe0bab26a73","Type":"ContainerDied","Data":"cdcd0efd55cb0eec9e739b179e9ad4dd3b4a5a19b1286d663c3a35f702514c63"} Jan 05 23:23:08 crc kubenswrapper[4910]: I0105 23:23:08.848516 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-af75-account-create-update-w7xpn" event={"ID":"e381097e-e5fe-4f7c-b5da-afe0bab26a73","Type":"ContainerStarted","Data":"2d4a93c4a0b35c3aae358c8c84b4ea4431f5ff731ebca81f6ba5fcdabc3de0a4"} Jan 05 23:23:08 crc kubenswrapper[4910]: I0105 23:23:08.851239 4910 generic.go:334] "Generic (PLEG): container finished" podID="6679f87f-3fe1-413f-bf56-c5a2c56de981" containerID="324dff769dd788f14462bf2394c802588cbe7f03e37ecca2dfd20adbd8cad442" exitCode=0 Jan 05 23:23:08 crc kubenswrapper[4910]: I0105 23:23:08.851307 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-5z8ph" event={"ID":"6679f87f-3fe1-413f-bf56-c5a2c56de981","Type":"ContainerDied","Data":"324dff769dd788f14462bf2394c802588cbe7f03e37ecca2dfd20adbd8cad442"} Jan 05 23:23:08 crc kubenswrapper[4910]: I0105 23:23:08.851347 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-5z8ph" event={"ID":"6679f87f-3fe1-413f-bf56-c5a2c56de981","Type":"ContainerStarted","Data":"d8d7c147e208965cdb91be0885cc9ccc8edce7993caea46c9f76bec93d716333"} Jan 05 23:23:10 crc kubenswrapper[4910]: I0105 23:23:10.427091 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-af75-account-create-update-w7xpn" Jan 05 23:23:10 crc kubenswrapper[4910]: I0105 23:23:10.432629 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-5z8ph" Jan 05 23:23:10 crc kubenswrapper[4910]: I0105 23:23:10.496657 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8n997\" (UniqueName: \"kubernetes.io/projected/e381097e-e5fe-4f7c-b5da-afe0bab26a73-kube-api-access-8n997\") pod \"e381097e-e5fe-4f7c-b5da-afe0bab26a73\" (UID: \"e381097e-e5fe-4f7c-b5da-afe0bab26a73\") " Jan 05 23:23:10 crc kubenswrapper[4910]: I0105 23:23:10.496749 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6679f87f-3fe1-413f-bf56-c5a2c56de981-operator-scripts\") pod \"6679f87f-3fe1-413f-bf56-c5a2c56de981\" (UID: \"6679f87f-3fe1-413f-bf56-c5a2c56de981\") " Jan 05 23:23:10 crc kubenswrapper[4910]: I0105 23:23:10.496806 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e381097e-e5fe-4f7c-b5da-afe0bab26a73-operator-scripts\") pod \"e381097e-e5fe-4f7c-b5da-afe0bab26a73\" (UID: \"e381097e-e5fe-4f7c-b5da-afe0bab26a73\") " Jan 05 23:23:10 crc kubenswrapper[4910]: I0105 23:23:10.496914 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-26hsf\" (UniqueName: \"kubernetes.io/projected/6679f87f-3fe1-413f-bf56-c5a2c56de981-kube-api-access-26hsf\") pod \"6679f87f-3fe1-413f-bf56-c5a2c56de981\" (UID: \"6679f87f-3fe1-413f-bf56-c5a2c56de981\") " Jan 05 23:23:10 crc kubenswrapper[4910]: I0105 23:23:10.498943 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e381097e-e5fe-4f7c-b5da-afe0bab26a73-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e381097e-e5fe-4f7c-b5da-afe0bab26a73" (UID: "e381097e-e5fe-4f7c-b5da-afe0bab26a73"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:23:10 crc kubenswrapper[4910]: I0105 23:23:10.498975 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6679f87f-3fe1-413f-bf56-c5a2c56de981-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6679f87f-3fe1-413f-bf56-c5a2c56de981" (UID: "6679f87f-3fe1-413f-bf56-c5a2c56de981"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:23:10 crc kubenswrapper[4910]: I0105 23:23:10.506095 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e381097e-e5fe-4f7c-b5da-afe0bab26a73-kube-api-access-8n997" (OuterVolumeSpecName: "kube-api-access-8n997") pod "e381097e-e5fe-4f7c-b5da-afe0bab26a73" (UID: "e381097e-e5fe-4f7c-b5da-afe0bab26a73"). InnerVolumeSpecName "kube-api-access-8n997". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:23:10 crc kubenswrapper[4910]: I0105 23:23:10.510027 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6679f87f-3fe1-413f-bf56-c5a2c56de981-kube-api-access-26hsf" (OuterVolumeSpecName: "kube-api-access-26hsf") pod "6679f87f-3fe1-413f-bf56-c5a2c56de981" (UID: "6679f87f-3fe1-413f-bf56-c5a2c56de981"). InnerVolumeSpecName "kube-api-access-26hsf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:23:10 crc kubenswrapper[4910]: I0105 23:23:10.600236 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-26hsf\" (UniqueName: \"kubernetes.io/projected/6679f87f-3fe1-413f-bf56-c5a2c56de981-kube-api-access-26hsf\") on node \"crc\" DevicePath \"\"" Jan 05 23:23:10 crc kubenswrapper[4910]: I0105 23:23:10.600662 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8n997\" (UniqueName: \"kubernetes.io/projected/e381097e-e5fe-4f7c-b5da-afe0bab26a73-kube-api-access-8n997\") on node \"crc\" DevicePath \"\"" Jan 05 23:23:10 crc kubenswrapper[4910]: I0105 23:23:10.600674 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6679f87f-3fe1-413f-bf56-c5a2c56de981-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:23:10 crc kubenswrapper[4910]: I0105 23:23:10.600684 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e381097e-e5fe-4f7c-b5da-afe0bab26a73-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:23:10 crc kubenswrapper[4910]: I0105 23:23:10.884887 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-af75-account-create-update-w7xpn" event={"ID":"e381097e-e5fe-4f7c-b5da-afe0bab26a73","Type":"ContainerDied","Data":"2d4a93c4a0b35c3aae358c8c84b4ea4431f5ff731ebca81f6ba5fcdabc3de0a4"} Jan 05 23:23:10 crc kubenswrapper[4910]: I0105 23:23:10.884955 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2d4a93c4a0b35c3aae358c8c84b4ea4431f5ff731ebca81f6ba5fcdabc3de0a4" Jan 05 23:23:10 crc kubenswrapper[4910]: I0105 23:23:10.885020 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-af75-account-create-update-w7xpn" Jan 05 23:23:10 crc kubenswrapper[4910]: I0105 23:23:10.890236 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-5z8ph" event={"ID":"6679f87f-3fe1-413f-bf56-c5a2c56de981","Type":"ContainerDied","Data":"d8d7c147e208965cdb91be0885cc9ccc8edce7993caea46c9f76bec93d716333"} Jan 05 23:23:10 crc kubenswrapper[4910]: I0105 23:23:10.890277 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d8d7c147e208965cdb91be0885cc9ccc8edce7993caea46c9f76bec93d716333" Jan 05 23:23:10 crc kubenswrapper[4910]: I0105 23:23:10.890492 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-5z8ph" Jan 05 23:23:12 crc kubenswrapper[4910]: I0105 23:23:12.887710 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-dg9ps"] Jan 05 23:23:12 crc kubenswrapper[4910]: E0105 23:23:12.891015 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e381097e-e5fe-4f7c-b5da-afe0bab26a73" containerName="mariadb-account-create-update" Jan 05 23:23:12 crc kubenswrapper[4910]: I0105 23:23:12.891040 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="e381097e-e5fe-4f7c-b5da-afe0bab26a73" containerName="mariadb-account-create-update" Jan 05 23:23:12 crc kubenswrapper[4910]: E0105 23:23:12.891084 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6679f87f-3fe1-413f-bf56-c5a2c56de981" containerName="mariadb-database-create" Jan 05 23:23:12 crc kubenswrapper[4910]: I0105 23:23:12.891091 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="6679f87f-3fe1-413f-bf56-c5a2c56de981" containerName="mariadb-database-create" Jan 05 23:23:12 crc kubenswrapper[4910]: I0105 23:23:12.891280 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="6679f87f-3fe1-413f-bf56-c5a2c56de981" containerName="mariadb-database-create" Jan 05 23:23:12 crc kubenswrapper[4910]: I0105 23:23:12.891306 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="e381097e-e5fe-4f7c-b5da-afe0bab26a73" containerName="mariadb-account-create-update" Jan 05 23:23:12 crc kubenswrapper[4910]: I0105 23:23:12.892166 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-dg9ps" Jan 05 23:23:12 crc kubenswrapper[4910]: I0105 23:23:12.903611 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-zrlkp" Jan 05 23:23:12 crc kubenswrapper[4910]: I0105 23:23:12.903761 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 05 23:23:12 crc kubenswrapper[4910]: I0105 23:23:12.910152 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 05 23:23:12 crc kubenswrapper[4910]: I0105 23:23:12.950179 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6766f689d9-xljtt"] Jan 05 23:23:12 crc kubenswrapper[4910]: I0105 23:23:12.968090 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6766f689d9-xljtt" Jan 05 23:23:12 crc kubenswrapper[4910]: I0105 23:23:12.968852 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7da2c266-02d4-4e31-b884-1d4b03678794-combined-ca-bundle\") pod \"placement-db-sync-dg9ps\" (UID: \"7da2c266-02d4-4e31-b884-1d4b03678794\") " pod="openstack/placement-db-sync-dg9ps" Jan 05 23:23:12 crc kubenswrapper[4910]: I0105 23:23:12.972699 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7da2c266-02d4-4e31-b884-1d4b03678794-scripts\") pod \"placement-db-sync-dg9ps\" (UID: \"7da2c266-02d4-4e31-b884-1d4b03678794\") " pod="openstack/placement-db-sync-dg9ps" Jan 05 23:23:12 crc kubenswrapper[4910]: I0105 23:23:12.972839 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7da2c266-02d4-4e31-b884-1d4b03678794-config-data\") pod \"placement-db-sync-dg9ps\" (UID: \"7da2c266-02d4-4e31-b884-1d4b03678794\") " pod="openstack/placement-db-sync-dg9ps" Jan 05 23:23:12 crc kubenswrapper[4910]: I0105 23:23:12.973388 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7da2c266-02d4-4e31-b884-1d4b03678794-logs\") pod \"placement-db-sync-dg9ps\" (UID: \"7da2c266-02d4-4e31-b884-1d4b03678794\") " pod="openstack/placement-db-sync-dg9ps" Jan 05 23:23:12 crc kubenswrapper[4910]: I0105 23:23:12.973429 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5867k\" (UniqueName: \"kubernetes.io/projected/7da2c266-02d4-4e31-b884-1d4b03678794-kube-api-access-5867k\") pod \"placement-db-sync-dg9ps\" (UID: \"7da2c266-02d4-4e31-b884-1d4b03678794\") " pod="openstack/placement-db-sync-dg9ps" Jan 05 23:23:12 crc kubenswrapper[4910]: I0105 23:23:12.982569 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-dg9ps"] Jan 05 23:23:12 crc kubenswrapper[4910]: I0105 23:23:12.994624 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6766f689d9-xljtt"] Jan 05 23:23:13 crc kubenswrapper[4910]: I0105 23:23:13.078800 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7da2c266-02d4-4e31-b884-1d4b03678794-config-data\") pod \"placement-db-sync-dg9ps\" (UID: \"7da2c266-02d4-4e31-b884-1d4b03678794\") " pod="openstack/placement-db-sync-dg9ps" Jan 05 23:23:13 crc kubenswrapper[4910]: I0105 23:23:13.078971 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7da2c266-02d4-4e31-b884-1d4b03678794-logs\") pod \"placement-db-sync-dg9ps\" (UID: \"7da2c266-02d4-4e31-b884-1d4b03678794\") " pod="openstack/placement-db-sync-dg9ps" Jan 05 23:23:13 crc kubenswrapper[4910]: I0105 23:23:13.079007 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dac76acf-d1d0-40dd-9bba-53f9f52eb844-dns-svc\") pod \"dnsmasq-dns-6766f689d9-xljtt\" (UID: \"dac76acf-d1d0-40dd-9bba-53f9f52eb844\") " pod="openstack/dnsmasq-dns-6766f689d9-xljtt" Jan 05 23:23:13 crc kubenswrapper[4910]: I0105 23:23:13.079029 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5867k\" (UniqueName: \"kubernetes.io/projected/7da2c266-02d4-4e31-b884-1d4b03678794-kube-api-access-5867k\") pod \"placement-db-sync-dg9ps\" (UID: \"7da2c266-02d4-4e31-b884-1d4b03678794\") " pod="openstack/placement-db-sync-dg9ps" Jan 05 23:23:13 crc kubenswrapper[4910]: I0105 23:23:13.079350 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dac76acf-d1d0-40dd-9bba-53f9f52eb844-ovsdbserver-sb\") pod \"dnsmasq-dns-6766f689d9-xljtt\" (UID: \"dac76acf-d1d0-40dd-9bba-53f9f52eb844\") " pod="openstack/dnsmasq-dns-6766f689d9-xljtt" Jan 05 23:23:13 crc kubenswrapper[4910]: I0105 23:23:13.079387 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dac76acf-d1d0-40dd-9bba-53f9f52eb844-ovsdbserver-nb\") pod \"dnsmasq-dns-6766f689d9-xljtt\" (UID: \"dac76acf-d1d0-40dd-9bba-53f9f52eb844\") " pod="openstack/dnsmasq-dns-6766f689d9-xljtt" Jan 05 23:23:13 crc kubenswrapper[4910]: I0105 23:23:13.079433 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dac76acf-d1d0-40dd-9bba-53f9f52eb844-config\") pod \"dnsmasq-dns-6766f689d9-xljtt\" (UID: \"dac76acf-d1d0-40dd-9bba-53f9f52eb844\") " pod="openstack/dnsmasq-dns-6766f689d9-xljtt" Jan 05 23:23:13 crc kubenswrapper[4910]: I0105 23:23:13.079592 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7da2c266-02d4-4e31-b884-1d4b03678794-logs\") pod \"placement-db-sync-dg9ps\" (UID: \"7da2c266-02d4-4e31-b884-1d4b03678794\") " pod="openstack/placement-db-sync-dg9ps" Jan 05 23:23:13 crc kubenswrapper[4910]: I0105 23:23:13.079610 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7da2c266-02d4-4e31-b884-1d4b03678794-combined-ca-bundle\") pod \"placement-db-sync-dg9ps\" (UID: \"7da2c266-02d4-4e31-b884-1d4b03678794\") " pod="openstack/placement-db-sync-dg9ps" Jan 05 23:23:13 crc kubenswrapper[4910]: I0105 23:23:13.080044 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7da2c266-02d4-4e31-b884-1d4b03678794-scripts\") pod \"placement-db-sync-dg9ps\" (UID: \"7da2c266-02d4-4e31-b884-1d4b03678794\") " pod="openstack/placement-db-sync-dg9ps" Jan 05 23:23:13 crc kubenswrapper[4910]: I0105 23:23:13.080097 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5lwl7\" (UniqueName: \"kubernetes.io/projected/dac76acf-d1d0-40dd-9bba-53f9f52eb844-kube-api-access-5lwl7\") pod \"dnsmasq-dns-6766f689d9-xljtt\" (UID: \"dac76acf-d1d0-40dd-9bba-53f9f52eb844\") " pod="openstack/dnsmasq-dns-6766f689d9-xljtt" Jan 05 23:23:13 crc kubenswrapper[4910]: I0105 23:23:13.085359 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7da2c266-02d4-4e31-b884-1d4b03678794-config-data\") pod \"placement-db-sync-dg9ps\" (UID: \"7da2c266-02d4-4e31-b884-1d4b03678794\") " pod="openstack/placement-db-sync-dg9ps" Jan 05 23:23:13 crc kubenswrapper[4910]: I0105 23:23:13.085550 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7da2c266-02d4-4e31-b884-1d4b03678794-scripts\") pod \"placement-db-sync-dg9ps\" (UID: \"7da2c266-02d4-4e31-b884-1d4b03678794\") " pod="openstack/placement-db-sync-dg9ps" Jan 05 23:23:13 crc kubenswrapper[4910]: I0105 23:23:13.086646 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7da2c266-02d4-4e31-b884-1d4b03678794-combined-ca-bundle\") pod \"placement-db-sync-dg9ps\" (UID: \"7da2c266-02d4-4e31-b884-1d4b03678794\") " pod="openstack/placement-db-sync-dg9ps" Jan 05 23:23:13 crc kubenswrapper[4910]: I0105 23:23:13.103477 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5867k\" (UniqueName: \"kubernetes.io/projected/7da2c266-02d4-4e31-b884-1d4b03678794-kube-api-access-5867k\") pod \"placement-db-sync-dg9ps\" (UID: \"7da2c266-02d4-4e31-b884-1d4b03678794\") " pod="openstack/placement-db-sync-dg9ps" Jan 05 23:23:13 crc kubenswrapper[4910]: I0105 23:23:13.183393 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dac76acf-d1d0-40dd-9bba-53f9f52eb844-ovsdbserver-sb\") pod \"dnsmasq-dns-6766f689d9-xljtt\" (UID: \"dac76acf-d1d0-40dd-9bba-53f9f52eb844\") " pod="openstack/dnsmasq-dns-6766f689d9-xljtt" Jan 05 23:23:13 crc kubenswrapper[4910]: I0105 23:23:13.182340 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dac76acf-d1d0-40dd-9bba-53f9f52eb844-ovsdbserver-sb\") pod \"dnsmasq-dns-6766f689d9-xljtt\" (UID: \"dac76acf-d1d0-40dd-9bba-53f9f52eb844\") " pod="openstack/dnsmasq-dns-6766f689d9-xljtt" Jan 05 23:23:13 crc kubenswrapper[4910]: I0105 23:23:13.183493 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dac76acf-d1d0-40dd-9bba-53f9f52eb844-ovsdbserver-nb\") pod \"dnsmasq-dns-6766f689d9-xljtt\" (UID: \"dac76acf-d1d0-40dd-9bba-53f9f52eb844\") " pod="openstack/dnsmasq-dns-6766f689d9-xljtt" Jan 05 23:23:13 crc kubenswrapper[4910]: I0105 23:23:13.183536 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dac76acf-d1d0-40dd-9bba-53f9f52eb844-config\") pod \"dnsmasq-dns-6766f689d9-xljtt\" (UID: \"dac76acf-d1d0-40dd-9bba-53f9f52eb844\") " pod="openstack/dnsmasq-dns-6766f689d9-xljtt" Jan 05 23:23:13 crc kubenswrapper[4910]: I0105 23:23:13.183674 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5lwl7\" (UniqueName: \"kubernetes.io/projected/dac76acf-d1d0-40dd-9bba-53f9f52eb844-kube-api-access-5lwl7\") pod \"dnsmasq-dns-6766f689d9-xljtt\" (UID: \"dac76acf-d1d0-40dd-9bba-53f9f52eb844\") " pod="openstack/dnsmasq-dns-6766f689d9-xljtt" Jan 05 23:23:13 crc kubenswrapper[4910]: I0105 23:23:13.183752 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dac76acf-d1d0-40dd-9bba-53f9f52eb844-dns-svc\") pod \"dnsmasq-dns-6766f689d9-xljtt\" (UID: \"dac76acf-d1d0-40dd-9bba-53f9f52eb844\") " pod="openstack/dnsmasq-dns-6766f689d9-xljtt" Jan 05 23:23:13 crc kubenswrapper[4910]: I0105 23:23:13.184536 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dac76acf-d1d0-40dd-9bba-53f9f52eb844-dns-svc\") pod \"dnsmasq-dns-6766f689d9-xljtt\" (UID: \"dac76acf-d1d0-40dd-9bba-53f9f52eb844\") " pod="openstack/dnsmasq-dns-6766f689d9-xljtt" Jan 05 23:23:13 crc kubenswrapper[4910]: I0105 23:23:13.184768 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dac76acf-d1d0-40dd-9bba-53f9f52eb844-config\") pod \"dnsmasq-dns-6766f689d9-xljtt\" (UID: \"dac76acf-d1d0-40dd-9bba-53f9f52eb844\") " pod="openstack/dnsmasq-dns-6766f689d9-xljtt" Jan 05 23:23:13 crc kubenswrapper[4910]: I0105 23:23:13.185339 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dac76acf-d1d0-40dd-9bba-53f9f52eb844-ovsdbserver-nb\") pod \"dnsmasq-dns-6766f689d9-xljtt\" (UID: \"dac76acf-d1d0-40dd-9bba-53f9f52eb844\") " pod="openstack/dnsmasq-dns-6766f689d9-xljtt" Jan 05 23:23:13 crc kubenswrapper[4910]: I0105 23:23:13.227460 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5lwl7\" (UniqueName: \"kubernetes.io/projected/dac76acf-d1d0-40dd-9bba-53f9f52eb844-kube-api-access-5lwl7\") pod \"dnsmasq-dns-6766f689d9-xljtt\" (UID: \"dac76acf-d1d0-40dd-9bba-53f9f52eb844\") " pod="openstack/dnsmasq-dns-6766f689d9-xljtt" Jan 05 23:23:13 crc kubenswrapper[4910]: I0105 23:23:13.228169 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-dg9ps" Jan 05 23:23:13 crc kubenswrapper[4910]: I0105 23:23:13.296527 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6766f689d9-xljtt" Jan 05 23:23:13 crc kubenswrapper[4910]: I0105 23:23:13.859545 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-dg9ps"] Jan 05 23:23:13 crc kubenswrapper[4910]: I0105 23:23:13.945534 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-dg9ps" event={"ID":"7da2c266-02d4-4e31-b884-1d4b03678794","Type":"ContainerStarted","Data":"d90a86ee20842ccfdacf9382719e4bedde344ab9f06d9e6fdd14e4599dea8d1e"} Jan 05 23:23:13 crc kubenswrapper[4910]: I0105 23:23:13.995030 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6766f689d9-xljtt"] Jan 05 23:23:14 crc kubenswrapper[4910]: W0105 23:23:14.005368 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddac76acf_d1d0_40dd_9bba_53f9f52eb844.slice/crio-24d2f353d1bd894faa6ff15ec98b05e00aa8b938b9ac784113013fd83221c8af WatchSource:0}: Error finding container 24d2f353d1bd894faa6ff15ec98b05e00aa8b938b9ac784113013fd83221c8af: Status 404 returned error can't find the container with id 24d2f353d1bd894faa6ff15ec98b05e00aa8b938b9ac784113013fd83221c8af Jan 05 23:23:14 crc kubenswrapper[4910]: I0105 23:23:14.961904 4910 generic.go:334] "Generic (PLEG): container finished" podID="dac76acf-d1d0-40dd-9bba-53f9f52eb844" containerID="346b9b475d5cf4120380530cad52e8cd8d2af07fef23afed40e7089da2fe9a94" exitCode=0 Jan 05 23:23:14 crc kubenswrapper[4910]: I0105 23:23:14.962140 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6766f689d9-xljtt" event={"ID":"dac76acf-d1d0-40dd-9bba-53f9f52eb844","Type":"ContainerDied","Data":"346b9b475d5cf4120380530cad52e8cd8d2af07fef23afed40e7089da2fe9a94"} Jan 05 23:23:14 crc kubenswrapper[4910]: I0105 23:23:14.962397 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6766f689d9-xljtt" event={"ID":"dac76acf-d1d0-40dd-9bba-53f9f52eb844","Type":"ContainerStarted","Data":"24d2f353d1bd894faa6ff15ec98b05e00aa8b938b9ac784113013fd83221c8af"} Jan 05 23:23:14 crc kubenswrapper[4910]: I0105 23:23:14.966461 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-dg9ps" event={"ID":"7da2c266-02d4-4e31-b884-1d4b03678794","Type":"ContainerStarted","Data":"96ecb3fd5b09d81e4a23dd75ad64c9af703b5ea4de235b53ba5c17e3d2d9d334"} Jan 05 23:23:15 crc kubenswrapper[4910]: I0105 23:23:15.981490 4910 generic.go:334] "Generic (PLEG): container finished" podID="7da2c266-02d4-4e31-b884-1d4b03678794" containerID="96ecb3fd5b09d81e4a23dd75ad64c9af703b5ea4de235b53ba5c17e3d2d9d334" exitCode=0 Jan 05 23:23:15 crc kubenswrapper[4910]: I0105 23:23:15.981609 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-dg9ps" event={"ID":"7da2c266-02d4-4e31-b884-1d4b03678794","Type":"ContainerDied","Data":"96ecb3fd5b09d81e4a23dd75ad64c9af703b5ea4de235b53ba5c17e3d2d9d334"} Jan 05 23:23:15 crc kubenswrapper[4910]: I0105 23:23:15.987358 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6766f689d9-xljtt" event={"ID":"dac76acf-d1d0-40dd-9bba-53f9f52eb844","Type":"ContainerStarted","Data":"dda3c8d467b38cffda17383a9c2affbda4b9b11be91f4b73324ddfa487968cd7"} Jan 05 23:23:15 crc kubenswrapper[4910]: I0105 23:23:15.987578 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6766f689d9-xljtt" Jan 05 23:23:16 crc kubenswrapper[4910]: I0105 23:23:16.090713 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6766f689d9-xljtt" podStartSLOduration=4.090682101 podStartE2EDuration="4.090682101s" podCreationTimestamp="2026-01-05 23:23:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:23:16.053936101 +0000 UTC m=+5527.631433811" watchObservedRunningTime="2026-01-05 23:23:16.090682101 +0000 UTC m=+5527.668179781" Jan 05 23:23:17 crc kubenswrapper[4910]: I0105 23:23:17.380295 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-dg9ps" Jan 05 23:23:17 crc kubenswrapper[4910]: I0105 23:23:17.479237 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7da2c266-02d4-4e31-b884-1d4b03678794-combined-ca-bundle\") pod \"7da2c266-02d4-4e31-b884-1d4b03678794\" (UID: \"7da2c266-02d4-4e31-b884-1d4b03678794\") " Jan 05 23:23:17 crc kubenswrapper[4910]: I0105 23:23:17.479311 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7da2c266-02d4-4e31-b884-1d4b03678794-scripts\") pod \"7da2c266-02d4-4e31-b884-1d4b03678794\" (UID: \"7da2c266-02d4-4e31-b884-1d4b03678794\") " Jan 05 23:23:17 crc kubenswrapper[4910]: I0105 23:23:17.479354 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7da2c266-02d4-4e31-b884-1d4b03678794-logs\") pod \"7da2c266-02d4-4e31-b884-1d4b03678794\" (UID: \"7da2c266-02d4-4e31-b884-1d4b03678794\") " Jan 05 23:23:17 crc kubenswrapper[4910]: I0105 23:23:17.479381 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5867k\" (UniqueName: \"kubernetes.io/projected/7da2c266-02d4-4e31-b884-1d4b03678794-kube-api-access-5867k\") pod \"7da2c266-02d4-4e31-b884-1d4b03678794\" (UID: \"7da2c266-02d4-4e31-b884-1d4b03678794\") " Jan 05 23:23:17 crc kubenswrapper[4910]: I0105 23:23:17.479433 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7da2c266-02d4-4e31-b884-1d4b03678794-config-data\") pod \"7da2c266-02d4-4e31-b884-1d4b03678794\" (UID: \"7da2c266-02d4-4e31-b884-1d4b03678794\") " Jan 05 23:23:17 crc kubenswrapper[4910]: I0105 23:23:17.480073 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7da2c266-02d4-4e31-b884-1d4b03678794-logs" (OuterVolumeSpecName: "logs") pod "7da2c266-02d4-4e31-b884-1d4b03678794" (UID: "7da2c266-02d4-4e31-b884-1d4b03678794"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:23:17 crc kubenswrapper[4910]: I0105 23:23:17.486488 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7da2c266-02d4-4e31-b884-1d4b03678794-kube-api-access-5867k" (OuterVolumeSpecName: "kube-api-access-5867k") pod "7da2c266-02d4-4e31-b884-1d4b03678794" (UID: "7da2c266-02d4-4e31-b884-1d4b03678794"). InnerVolumeSpecName "kube-api-access-5867k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:23:17 crc kubenswrapper[4910]: I0105 23:23:17.486642 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7da2c266-02d4-4e31-b884-1d4b03678794-scripts" (OuterVolumeSpecName: "scripts") pod "7da2c266-02d4-4e31-b884-1d4b03678794" (UID: "7da2c266-02d4-4e31-b884-1d4b03678794"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:23:17 crc kubenswrapper[4910]: I0105 23:23:17.512206 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7da2c266-02d4-4e31-b884-1d4b03678794-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7da2c266-02d4-4e31-b884-1d4b03678794" (UID: "7da2c266-02d4-4e31-b884-1d4b03678794"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:23:17 crc kubenswrapper[4910]: I0105 23:23:17.528607 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7da2c266-02d4-4e31-b884-1d4b03678794-config-data" (OuterVolumeSpecName: "config-data") pod "7da2c266-02d4-4e31-b884-1d4b03678794" (UID: "7da2c266-02d4-4e31-b884-1d4b03678794"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:23:17 crc kubenswrapper[4910]: I0105 23:23:17.582465 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7da2c266-02d4-4e31-b884-1d4b03678794-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:23:17 crc kubenswrapper[4910]: I0105 23:23:17.582540 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7da2c266-02d4-4e31-b884-1d4b03678794-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:23:17 crc kubenswrapper[4910]: I0105 23:23:17.582566 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7da2c266-02d4-4e31-b884-1d4b03678794-logs\") on node \"crc\" DevicePath \"\"" Jan 05 23:23:17 crc kubenswrapper[4910]: I0105 23:23:17.582592 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5867k\" (UniqueName: \"kubernetes.io/projected/7da2c266-02d4-4e31-b884-1d4b03678794-kube-api-access-5867k\") on node \"crc\" DevicePath \"\"" Jan 05 23:23:17 crc kubenswrapper[4910]: I0105 23:23:17.582622 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7da2c266-02d4-4e31-b884-1d4b03678794-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:23:18 crc kubenswrapper[4910]: I0105 23:23:18.010369 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-dg9ps" event={"ID":"7da2c266-02d4-4e31-b884-1d4b03678794","Type":"ContainerDied","Data":"d90a86ee20842ccfdacf9382719e4bedde344ab9f06d9e6fdd14e4599dea8d1e"} Jan 05 23:23:18 crc kubenswrapper[4910]: I0105 23:23:18.010762 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d90a86ee20842ccfdacf9382719e4bedde344ab9f06d9e6fdd14e4599dea8d1e" Jan 05 23:23:18 crc kubenswrapper[4910]: I0105 23:23:18.010422 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-dg9ps" Jan 05 23:23:18 crc kubenswrapper[4910]: I0105 23:23:18.514444 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-77c85cc8c4-khhdx"] Jan 05 23:23:18 crc kubenswrapper[4910]: E0105 23:23:18.516197 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7da2c266-02d4-4e31-b884-1d4b03678794" containerName="placement-db-sync" Jan 05 23:23:18 crc kubenswrapper[4910]: I0105 23:23:18.516463 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="7da2c266-02d4-4e31-b884-1d4b03678794" containerName="placement-db-sync" Jan 05 23:23:18 crc kubenswrapper[4910]: I0105 23:23:18.516906 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="7da2c266-02d4-4e31-b884-1d4b03678794" containerName="placement-db-sync" Jan 05 23:23:18 crc kubenswrapper[4910]: I0105 23:23:18.518429 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-77c85cc8c4-khhdx" Jan 05 23:23:18 crc kubenswrapper[4910]: I0105 23:23:18.521838 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-zrlkp" Jan 05 23:23:18 crc kubenswrapper[4910]: I0105 23:23:18.521992 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 05 23:23:18 crc kubenswrapper[4910]: I0105 23:23:18.524230 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 05 23:23:18 crc kubenswrapper[4910]: I0105 23:23:18.541867 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-77c85cc8c4-khhdx"] Jan 05 23:23:18 crc kubenswrapper[4910]: I0105 23:23:18.601194 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/564d3691-3354-45ec-b2b7-29413b00f611-scripts\") pod \"placement-77c85cc8c4-khhdx\" (UID: \"564d3691-3354-45ec-b2b7-29413b00f611\") " pod="openstack/placement-77c85cc8c4-khhdx" Jan 05 23:23:18 crc kubenswrapper[4910]: I0105 23:23:18.601269 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/564d3691-3354-45ec-b2b7-29413b00f611-logs\") pod \"placement-77c85cc8c4-khhdx\" (UID: \"564d3691-3354-45ec-b2b7-29413b00f611\") " pod="openstack/placement-77c85cc8c4-khhdx" Jan 05 23:23:18 crc kubenswrapper[4910]: I0105 23:23:18.601328 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/564d3691-3354-45ec-b2b7-29413b00f611-config-data\") pod \"placement-77c85cc8c4-khhdx\" (UID: \"564d3691-3354-45ec-b2b7-29413b00f611\") " pod="openstack/placement-77c85cc8c4-khhdx" Jan 05 23:23:18 crc kubenswrapper[4910]: I0105 23:23:18.601381 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-svlf4\" (UniqueName: \"kubernetes.io/projected/564d3691-3354-45ec-b2b7-29413b00f611-kube-api-access-svlf4\") pod \"placement-77c85cc8c4-khhdx\" (UID: \"564d3691-3354-45ec-b2b7-29413b00f611\") " pod="openstack/placement-77c85cc8c4-khhdx" Jan 05 23:23:18 crc kubenswrapper[4910]: I0105 23:23:18.601407 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/564d3691-3354-45ec-b2b7-29413b00f611-combined-ca-bundle\") pod \"placement-77c85cc8c4-khhdx\" (UID: \"564d3691-3354-45ec-b2b7-29413b00f611\") " pod="openstack/placement-77c85cc8c4-khhdx" Jan 05 23:23:18 crc kubenswrapper[4910]: I0105 23:23:18.703449 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/564d3691-3354-45ec-b2b7-29413b00f611-config-data\") pod \"placement-77c85cc8c4-khhdx\" (UID: \"564d3691-3354-45ec-b2b7-29413b00f611\") " pod="openstack/placement-77c85cc8c4-khhdx" Jan 05 23:23:18 crc kubenswrapper[4910]: I0105 23:23:18.703538 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-svlf4\" (UniqueName: \"kubernetes.io/projected/564d3691-3354-45ec-b2b7-29413b00f611-kube-api-access-svlf4\") pod \"placement-77c85cc8c4-khhdx\" (UID: \"564d3691-3354-45ec-b2b7-29413b00f611\") " pod="openstack/placement-77c85cc8c4-khhdx" Jan 05 23:23:18 crc kubenswrapper[4910]: I0105 23:23:18.703568 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/564d3691-3354-45ec-b2b7-29413b00f611-combined-ca-bundle\") pod \"placement-77c85cc8c4-khhdx\" (UID: \"564d3691-3354-45ec-b2b7-29413b00f611\") " pod="openstack/placement-77c85cc8c4-khhdx" Jan 05 23:23:18 crc kubenswrapper[4910]: I0105 23:23:18.703651 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/564d3691-3354-45ec-b2b7-29413b00f611-scripts\") pod \"placement-77c85cc8c4-khhdx\" (UID: \"564d3691-3354-45ec-b2b7-29413b00f611\") " pod="openstack/placement-77c85cc8c4-khhdx" Jan 05 23:23:18 crc kubenswrapper[4910]: I0105 23:23:18.703700 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/564d3691-3354-45ec-b2b7-29413b00f611-logs\") pod \"placement-77c85cc8c4-khhdx\" (UID: \"564d3691-3354-45ec-b2b7-29413b00f611\") " pod="openstack/placement-77c85cc8c4-khhdx" Jan 05 23:23:18 crc kubenswrapper[4910]: I0105 23:23:18.704331 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/564d3691-3354-45ec-b2b7-29413b00f611-logs\") pod \"placement-77c85cc8c4-khhdx\" (UID: \"564d3691-3354-45ec-b2b7-29413b00f611\") " pod="openstack/placement-77c85cc8c4-khhdx" Jan 05 23:23:18 crc kubenswrapper[4910]: I0105 23:23:18.714115 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/564d3691-3354-45ec-b2b7-29413b00f611-scripts\") pod \"placement-77c85cc8c4-khhdx\" (UID: \"564d3691-3354-45ec-b2b7-29413b00f611\") " pod="openstack/placement-77c85cc8c4-khhdx" Jan 05 23:23:18 crc kubenswrapper[4910]: I0105 23:23:18.717738 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/564d3691-3354-45ec-b2b7-29413b00f611-config-data\") pod \"placement-77c85cc8c4-khhdx\" (UID: \"564d3691-3354-45ec-b2b7-29413b00f611\") " pod="openstack/placement-77c85cc8c4-khhdx" Jan 05 23:23:18 crc kubenswrapper[4910]: I0105 23:23:18.718019 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/564d3691-3354-45ec-b2b7-29413b00f611-combined-ca-bundle\") pod \"placement-77c85cc8c4-khhdx\" (UID: \"564d3691-3354-45ec-b2b7-29413b00f611\") " pod="openstack/placement-77c85cc8c4-khhdx" Jan 05 23:23:18 crc kubenswrapper[4910]: I0105 23:23:18.729112 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-svlf4\" (UniqueName: \"kubernetes.io/projected/564d3691-3354-45ec-b2b7-29413b00f611-kube-api-access-svlf4\") pod \"placement-77c85cc8c4-khhdx\" (UID: \"564d3691-3354-45ec-b2b7-29413b00f611\") " pod="openstack/placement-77c85cc8c4-khhdx" Jan 05 23:23:18 crc kubenswrapper[4910]: I0105 23:23:18.854600 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-77c85cc8c4-khhdx" Jan 05 23:23:19 crc kubenswrapper[4910]: I0105 23:23:19.390166 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-77c85cc8c4-khhdx"] Jan 05 23:23:19 crc kubenswrapper[4910]: I0105 23:23:19.721622 4910 scope.go:117] "RemoveContainer" containerID="c31f0b3ea3ce0d4b0fb62da87fc6d8144c3940f5821592c646ec514147dae31e" Jan 05 23:23:19 crc kubenswrapper[4910]: E0105 23:23:19.721867 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:23:20 crc kubenswrapper[4910]: I0105 23:23:20.053024 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-77c85cc8c4-khhdx" event={"ID":"564d3691-3354-45ec-b2b7-29413b00f611","Type":"ContainerStarted","Data":"cef8df2d2ef1d868e7600e56d8a7d8e16909da6142919c0e738a49a372cd0e73"} Jan 05 23:23:20 crc kubenswrapper[4910]: I0105 23:23:20.053762 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-77c85cc8c4-khhdx" event={"ID":"564d3691-3354-45ec-b2b7-29413b00f611","Type":"ContainerStarted","Data":"49ba44484a5b4f7fdfac482637ed45be787a77dfb0b9ca8c28578be6ba9652c4"} Jan 05 23:23:20 crc kubenswrapper[4910]: I0105 23:23:20.053802 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-77c85cc8c4-khhdx" event={"ID":"564d3691-3354-45ec-b2b7-29413b00f611","Type":"ContainerStarted","Data":"f203ba1f81c2118de5bc0fc2a8e49def81f35e8aa2bb767f61cded5174d10cc0"} Jan 05 23:23:20 crc kubenswrapper[4910]: I0105 23:23:20.053837 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-77c85cc8c4-khhdx" Jan 05 23:23:20 crc kubenswrapper[4910]: I0105 23:23:20.053862 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-77c85cc8c4-khhdx" Jan 05 23:23:20 crc kubenswrapper[4910]: I0105 23:23:20.095288 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-77c85cc8c4-khhdx" podStartSLOduration=2.095249526 podStartE2EDuration="2.095249526s" podCreationTimestamp="2026-01-05 23:23:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:23:20.079432524 +0000 UTC m=+5531.656930234" watchObservedRunningTime="2026-01-05 23:23:20.095249526 +0000 UTC m=+5531.672747206" Jan 05 23:23:23 crc kubenswrapper[4910]: I0105 23:23:23.300379 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6766f689d9-xljtt" Jan 05 23:23:23 crc kubenswrapper[4910]: I0105 23:23:23.382894 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bf548fdbf-ns22l"] Jan 05 23:23:23 crc kubenswrapper[4910]: I0105 23:23:23.383216 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-bf548fdbf-ns22l" podUID="f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9" containerName="dnsmasq-dns" containerID="cri-o://36c8bbc9d52d78a938cdc8e2ae378726984edc58bdf0c119319758644c294da5" gracePeriod=10 Jan 05 23:23:23 crc kubenswrapper[4910]: I0105 23:23:23.921452 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bf548fdbf-ns22l" Jan 05 23:23:24 crc kubenswrapper[4910]: I0105 23:23:24.026737 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bw6dj\" (UniqueName: \"kubernetes.io/projected/f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9-kube-api-access-bw6dj\") pod \"f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9\" (UID: \"f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9\") " Jan 05 23:23:24 crc kubenswrapper[4910]: I0105 23:23:24.026814 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9-config\") pod \"f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9\" (UID: \"f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9\") " Jan 05 23:23:24 crc kubenswrapper[4910]: I0105 23:23:24.026925 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9-dns-svc\") pod \"f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9\" (UID: \"f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9\") " Jan 05 23:23:24 crc kubenswrapper[4910]: I0105 23:23:24.026941 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9-ovsdbserver-sb\") pod \"f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9\" (UID: \"f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9\") " Jan 05 23:23:24 crc kubenswrapper[4910]: I0105 23:23:24.027017 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9-ovsdbserver-nb\") pod \"f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9\" (UID: \"f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9\") " Jan 05 23:23:24 crc kubenswrapper[4910]: I0105 23:23:24.049629 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9-kube-api-access-bw6dj" (OuterVolumeSpecName: "kube-api-access-bw6dj") pod "f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9" (UID: "f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9"). InnerVolumeSpecName "kube-api-access-bw6dj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:23:24 crc kubenswrapper[4910]: I0105 23:23:24.072935 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9" (UID: "f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:23:24 crc kubenswrapper[4910]: I0105 23:23:24.076003 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9-config" (OuterVolumeSpecName: "config") pod "f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9" (UID: "f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:23:24 crc kubenswrapper[4910]: I0105 23:23:24.083376 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9" (UID: "f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:23:24 crc kubenswrapper[4910]: I0105 23:23:24.085250 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9" (UID: "f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:23:24 crc kubenswrapper[4910]: I0105 23:23:24.100813 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bf548fdbf-ns22l" Jan 05 23:23:24 crc kubenswrapper[4910]: I0105 23:23:24.100839 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bf548fdbf-ns22l" event={"ID":"f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9","Type":"ContainerDied","Data":"36c8bbc9d52d78a938cdc8e2ae378726984edc58bdf0c119319758644c294da5"} Jan 05 23:23:24 crc kubenswrapper[4910]: I0105 23:23:24.100914 4910 scope.go:117] "RemoveContainer" containerID="36c8bbc9d52d78a938cdc8e2ae378726984edc58bdf0c119319758644c294da5" Jan 05 23:23:24 crc kubenswrapper[4910]: I0105 23:23:24.102054 4910 generic.go:334] "Generic (PLEG): container finished" podID="f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9" containerID="36c8bbc9d52d78a938cdc8e2ae378726984edc58bdf0c119319758644c294da5" exitCode=0 Jan 05 23:23:24 crc kubenswrapper[4910]: I0105 23:23:24.102140 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bf548fdbf-ns22l" event={"ID":"f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9","Type":"ContainerDied","Data":"604f8a36a8d62465e8cb4738dc71606d68a363c654d71bc73a6743b974ed093e"} Jan 05 23:23:24 crc kubenswrapper[4910]: I0105 23:23:24.129349 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bw6dj\" (UniqueName: \"kubernetes.io/projected/f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9-kube-api-access-bw6dj\") on node \"crc\" DevicePath \"\"" Jan 05 23:23:24 crc kubenswrapper[4910]: I0105 23:23:24.129392 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9-config\") on node \"crc\" DevicePath \"\"" Jan 05 23:23:24 crc kubenswrapper[4910]: I0105 23:23:24.129405 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 05 23:23:24 crc kubenswrapper[4910]: I0105 23:23:24.129418 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 05 23:23:24 crc kubenswrapper[4910]: I0105 23:23:24.129431 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 05 23:23:24 crc kubenswrapper[4910]: I0105 23:23:24.146310 4910 scope.go:117] "RemoveContainer" containerID="fb44cbb288aa4a3310274b97bfb71f7326bda9a92406e3fa2a83d33247a4c43b" Jan 05 23:23:24 crc kubenswrapper[4910]: I0105 23:23:24.153206 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bf548fdbf-ns22l"] Jan 05 23:23:24 crc kubenswrapper[4910]: I0105 23:23:24.172667 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-bf548fdbf-ns22l"] Jan 05 23:23:24 crc kubenswrapper[4910]: I0105 23:23:24.174442 4910 scope.go:117] "RemoveContainer" containerID="36c8bbc9d52d78a938cdc8e2ae378726984edc58bdf0c119319758644c294da5" Jan 05 23:23:24 crc kubenswrapper[4910]: E0105 23:23:24.175306 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"36c8bbc9d52d78a938cdc8e2ae378726984edc58bdf0c119319758644c294da5\": container with ID starting with 36c8bbc9d52d78a938cdc8e2ae378726984edc58bdf0c119319758644c294da5 not found: ID does not exist" containerID="36c8bbc9d52d78a938cdc8e2ae378726984edc58bdf0c119319758644c294da5" Jan 05 23:23:24 crc kubenswrapper[4910]: I0105 23:23:24.175352 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36c8bbc9d52d78a938cdc8e2ae378726984edc58bdf0c119319758644c294da5"} err="failed to get container status \"36c8bbc9d52d78a938cdc8e2ae378726984edc58bdf0c119319758644c294da5\": rpc error: code = NotFound desc = could not find container \"36c8bbc9d52d78a938cdc8e2ae378726984edc58bdf0c119319758644c294da5\": container with ID starting with 36c8bbc9d52d78a938cdc8e2ae378726984edc58bdf0c119319758644c294da5 not found: ID does not exist" Jan 05 23:23:24 crc kubenswrapper[4910]: I0105 23:23:24.175384 4910 scope.go:117] "RemoveContainer" containerID="fb44cbb288aa4a3310274b97bfb71f7326bda9a92406e3fa2a83d33247a4c43b" Jan 05 23:23:24 crc kubenswrapper[4910]: E0105 23:23:24.176077 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fb44cbb288aa4a3310274b97bfb71f7326bda9a92406e3fa2a83d33247a4c43b\": container with ID starting with fb44cbb288aa4a3310274b97bfb71f7326bda9a92406e3fa2a83d33247a4c43b not found: ID does not exist" containerID="fb44cbb288aa4a3310274b97bfb71f7326bda9a92406e3fa2a83d33247a4c43b" Jan 05 23:23:24 crc kubenswrapper[4910]: I0105 23:23:24.176096 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fb44cbb288aa4a3310274b97bfb71f7326bda9a92406e3fa2a83d33247a4c43b"} err="failed to get container status \"fb44cbb288aa4a3310274b97bfb71f7326bda9a92406e3fa2a83d33247a4c43b\": rpc error: code = NotFound desc = could not find container \"fb44cbb288aa4a3310274b97bfb71f7326bda9a92406e3fa2a83d33247a4c43b\": container with ID starting with fb44cbb288aa4a3310274b97bfb71f7326bda9a92406e3fa2a83d33247a4c43b not found: ID does not exist" Jan 05 23:23:24 crc kubenswrapper[4910]: I0105 23:23:24.734754 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9" path="/var/lib/kubelet/pods/f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9/volumes" Jan 05 23:23:32 crc kubenswrapper[4910]: I0105 23:23:32.720982 4910 scope.go:117] "RemoveContainer" containerID="c31f0b3ea3ce0d4b0fb62da87fc6d8144c3940f5821592c646ec514147dae31e" Jan 05 23:23:32 crc kubenswrapper[4910]: E0105 23:23:32.721728 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:23:45 crc kubenswrapper[4910]: I0105 23:23:45.410205 4910 scope.go:117] "RemoveContainer" containerID="51d83ea2b54372b0736ddc27ea1d88509b3cf17f1e4fe5c287c70d72e4a57d21" Jan 05 23:23:47 crc kubenswrapper[4910]: I0105 23:23:47.722272 4910 scope.go:117] "RemoveContainer" containerID="c31f0b3ea3ce0d4b0fb62da87fc6d8144c3940f5821592c646ec514147dae31e" Jan 05 23:23:47 crc kubenswrapper[4910]: E0105 23:23:47.723356 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:23:49 crc kubenswrapper[4910]: I0105 23:23:49.932965 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-77c85cc8c4-khhdx" Jan 05 23:23:49 crc kubenswrapper[4910]: I0105 23:23:49.952386 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-77c85cc8c4-khhdx" Jan 05 23:23:58 crc kubenswrapper[4910]: I0105 23:23:58.728111 4910 scope.go:117] "RemoveContainer" containerID="c31f0b3ea3ce0d4b0fb62da87fc6d8144c3940f5821592c646ec514147dae31e" Jan 05 23:23:58 crc kubenswrapper[4910]: E0105 23:23:58.729435 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:24:09 crc kubenswrapper[4910]: I0105 23:24:09.724950 4910 scope.go:117] "RemoveContainer" containerID="c31f0b3ea3ce0d4b0fb62da87fc6d8144c3940f5821592c646ec514147dae31e" Jan 05 23:24:09 crc kubenswrapper[4910]: E0105 23:24:09.726508 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:24:15 crc kubenswrapper[4910]: I0105 23:24:15.640593 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-2dz4h"] Jan 05 23:24:15 crc kubenswrapper[4910]: E0105 23:24:15.641743 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9" containerName="init" Jan 05 23:24:15 crc kubenswrapper[4910]: I0105 23:24:15.641760 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9" containerName="init" Jan 05 23:24:15 crc kubenswrapper[4910]: E0105 23:24:15.641784 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9" containerName="dnsmasq-dns" Jan 05 23:24:15 crc kubenswrapper[4910]: I0105 23:24:15.641794 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9" containerName="dnsmasq-dns" Jan 05 23:24:15 crc kubenswrapper[4910]: I0105 23:24:15.642042 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1a998d7-e5b3-4bf3-9690-dd73f6bef4a9" containerName="dnsmasq-dns" Jan 05 23:24:15 crc kubenswrapper[4910]: I0105 23:24:15.642799 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-2dz4h" Jan 05 23:24:15 crc kubenswrapper[4910]: I0105 23:24:15.655983 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-2dz4h"] Jan 05 23:24:15 crc kubenswrapper[4910]: I0105 23:24:15.727747 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee-operator-scripts\") pod \"nova-api-db-create-2dz4h\" (UID: \"5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee\") " pod="openstack/nova-api-db-create-2dz4h" Jan 05 23:24:15 crc kubenswrapper[4910]: I0105 23:24:15.728073 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbqj9\" (UniqueName: \"kubernetes.io/projected/5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee-kube-api-access-bbqj9\") pod \"nova-api-db-create-2dz4h\" (UID: \"5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee\") " pod="openstack/nova-api-db-create-2dz4h" Jan 05 23:24:15 crc kubenswrapper[4910]: I0105 23:24:15.739573 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-jtx2j"] Jan 05 23:24:15 crc kubenswrapper[4910]: I0105 23:24:15.740782 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-jtx2j" Jan 05 23:24:15 crc kubenswrapper[4910]: I0105 23:24:15.748915 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-jtx2j"] Jan 05 23:24:15 crc kubenswrapper[4910]: I0105 23:24:15.830003 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee-operator-scripts\") pod \"nova-api-db-create-2dz4h\" (UID: \"5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee\") " pod="openstack/nova-api-db-create-2dz4h" Jan 05 23:24:15 crc kubenswrapper[4910]: I0105 23:24:15.830089 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbqj9\" (UniqueName: \"kubernetes.io/projected/5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee-kube-api-access-bbqj9\") pod \"nova-api-db-create-2dz4h\" (UID: \"5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee\") " pod="openstack/nova-api-db-create-2dz4h" Jan 05 23:24:15 crc kubenswrapper[4910]: I0105 23:24:15.831170 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee-operator-scripts\") pod \"nova-api-db-create-2dz4h\" (UID: \"5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee\") " pod="openstack/nova-api-db-create-2dz4h" Jan 05 23:24:15 crc kubenswrapper[4910]: I0105 23:24:15.831413 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-77czx\" (UniqueName: \"kubernetes.io/projected/d8dfb60d-eef4-4e99-abda-f6a7de62bf6d-kube-api-access-77czx\") pod \"nova-cell0-db-create-jtx2j\" (UID: \"d8dfb60d-eef4-4e99-abda-f6a7de62bf6d\") " pod="openstack/nova-cell0-db-create-jtx2j" Jan 05 23:24:15 crc kubenswrapper[4910]: I0105 23:24:15.832028 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d8dfb60d-eef4-4e99-abda-f6a7de62bf6d-operator-scripts\") pod \"nova-cell0-db-create-jtx2j\" (UID: \"d8dfb60d-eef4-4e99-abda-f6a7de62bf6d\") " pod="openstack/nova-cell0-db-create-jtx2j" Jan 05 23:24:15 crc kubenswrapper[4910]: I0105 23:24:15.844975 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-1d78-account-create-update-pxgb2"] Jan 05 23:24:15 crc kubenswrapper[4910]: I0105 23:24:15.846543 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-1d78-account-create-update-pxgb2" Jan 05 23:24:15 crc kubenswrapper[4910]: I0105 23:24:15.849686 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 05 23:24:15 crc kubenswrapper[4910]: I0105 23:24:15.854609 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-1d78-account-create-update-pxgb2"] Jan 05 23:24:15 crc kubenswrapper[4910]: I0105 23:24:15.856350 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbqj9\" (UniqueName: \"kubernetes.io/projected/5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee-kube-api-access-bbqj9\") pod \"nova-api-db-create-2dz4h\" (UID: \"5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee\") " pod="openstack/nova-api-db-create-2dz4h" Jan 05 23:24:15 crc kubenswrapper[4910]: I0105 23:24:15.932108 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-2fjzc"] Jan 05 23:24:15 crc kubenswrapper[4910]: I0105 23:24:15.934405 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e95d5f5d-4fb4-47c9-ab6e-93d93949ead2-operator-scripts\") pod \"nova-api-1d78-account-create-update-pxgb2\" (UID: \"e95d5f5d-4fb4-47c9-ab6e-93d93949ead2\") " pod="openstack/nova-api-1d78-account-create-update-pxgb2" Jan 05 23:24:15 crc kubenswrapper[4910]: I0105 23:24:15.934479 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-77czx\" (UniqueName: \"kubernetes.io/projected/d8dfb60d-eef4-4e99-abda-f6a7de62bf6d-kube-api-access-77czx\") pod \"nova-cell0-db-create-jtx2j\" (UID: \"d8dfb60d-eef4-4e99-abda-f6a7de62bf6d\") " pod="openstack/nova-cell0-db-create-jtx2j" Jan 05 23:24:15 crc kubenswrapper[4910]: I0105 23:24:15.934525 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lfhw4\" (UniqueName: \"kubernetes.io/projected/e95d5f5d-4fb4-47c9-ab6e-93d93949ead2-kube-api-access-lfhw4\") pod \"nova-api-1d78-account-create-update-pxgb2\" (UID: \"e95d5f5d-4fb4-47c9-ab6e-93d93949ead2\") " pod="openstack/nova-api-1d78-account-create-update-pxgb2" Jan 05 23:24:15 crc kubenswrapper[4910]: I0105 23:24:15.934562 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d8dfb60d-eef4-4e99-abda-f6a7de62bf6d-operator-scripts\") pod \"nova-cell0-db-create-jtx2j\" (UID: \"d8dfb60d-eef4-4e99-abda-f6a7de62bf6d\") " pod="openstack/nova-cell0-db-create-jtx2j" Jan 05 23:24:15 crc kubenswrapper[4910]: I0105 23:24:15.935722 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d8dfb60d-eef4-4e99-abda-f6a7de62bf6d-operator-scripts\") pod \"nova-cell0-db-create-jtx2j\" (UID: \"d8dfb60d-eef4-4e99-abda-f6a7de62bf6d\") " pod="openstack/nova-cell0-db-create-jtx2j" Jan 05 23:24:15 crc kubenswrapper[4910]: I0105 23:24:15.935733 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-2fjzc" Jan 05 23:24:15 crc kubenswrapper[4910]: I0105 23:24:15.945618 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-2fjzc"] Jan 05 23:24:15 crc kubenswrapper[4910]: I0105 23:24:15.962337 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-2dz4h" Jan 05 23:24:15 crc kubenswrapper[4910]: I0105 23:24:15.964960 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-77czx\" (UniqueName: \"kubernetes.io/projected/d8dfb60d-eef4-4e99-abda-f6a7de62bf6d-kube-api-access-77czx\") pod \"nova-cell0-db-create-jtx2j\" (UID: \"d8dfb60d-eef4-4e99-abda-f6a7de62bf6d\") " pod="openstack/nova-cell0-db-create-jtx2j" Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.038827 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lfhw4\" (UniqueName: \"kubernetes.io/projected/e95d5f5d-4fb4-47c9-ab6e-93d93949ead2-kube-api-access-lfhw4\") pod \"nova-api-1d78-account-create-update-pxgb2\" (UID: \"e95d5f5d-4fb4-47c9-ab6e-93d93949ead2\") " pod="openstack/nova-api-1d78-account-create-update-pxgb2" Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.045412 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e7fec6e1-d1a5-49fb-afe2-36b2965a2049-operator-scripts\") pod \"nova-cell1-db-create-2fjzc\" (UID: \"e7fec6e1-d1a5-49fb-afe2-36b2965a2049\") " pod="openstack/nova-cell1-db-create-2fjzc" Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.045925 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8xshw\" (UniqueName: \"kubernetes.io/projected/e7fec6e1-d1a5-49fb-afe2-36b2965a2049-kube-api-access-8xshw\") pod \"nova-cell1-db-create-2fjzc\" (UID: \"e7fec6e1-d1a5-49fb-afe2-36b2965a2049\") " pod="openstack/nova-cell1-db-create-2fjzc" Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.045988 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e95d5f5d-4fb4-47c9-ab6e-93d93949ead2-operator-scripts\") pod \"nova-api-1d78-account-create-update-pxgb2\" (UID: \"e95d5f5d-4fb4-47c9-ab6e-93d93949ead2\") " pod="openstack/nova-api-1d78-account-create-update-pxgb2" Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.047246 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e95d5f5d-4fb4-47c9-ab6e-93d93949ead2-operator-scripts\") pod \"nova-api-1d78-account-create-update-pxgb2\" (UID: \"e95d5f5d-4fb4-47c9-ab6e-93d93949ead2\") " pod="openstack/nova-api-1d78-account-create-update-pxgb2" Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.060142 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-jtx2j" Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.067640 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-53ac-account-create-update-pllbn"] Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.070029 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-53ac-account-create-update-pllbn" Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.075439 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lfhw4\" (UniqueName: \"kubernetes.io/projected/e95d5f5d-4fb4-47c9-ab6e-93d93949ead2-kube-api-access-lfhw4\") pod \"nova-api-1d78-account-create-update-pxgb2\" (UID: \"e95d5f5d-4fb4-47c9-ab6e-93d93949ead2\") " pod="openstack/nova-api-1d78-account-create-update-pxgb2" Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.093948 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.126506 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-53ac-account-create-update-pllbn"] Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.150778 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e7fec6e1-d1a5-49fb-afe2-36b2965a2049-operator-scripts\") pod \"nova-cell1-db-create-2fjzc\" (UID: \"e7fec6e1-d1a5-49fb-afe2-36b2965a2049\") " pod="openstack/nova-cell1-db-create-2fjzc" Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.151217 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjgrk\" (UniqueName: \"kubernetes.io/projected/531f78d8-c1f4-44f8-873a-0808f37b1dce-kube-api-access-gjgrk\") pod \"nova-cell0-53ac-account-create-update-pllbn\" (UID: \"531f78d8-c1f4-44f8-873a-0808f37b1dce\") " pod="openstack/nova-cell0-53ac-account-create-update-pllbn" Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.151325 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/531f78d8-c1f4-44f8-873a-0808f37b1dce-operator-scripts\") pod \"nova-cell0-53ac-account-create-update-pllbn\" (UID: \"531f78d8-c1f4-44f8-873a-0808f37b1dce\") " pod="openstack/nova-cell0-53ac-account-create-update-pllbn" Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.151408 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8xshw\" (UniqueName: \"kubernetes.io/projected/e7fec6e1-d1a5-49fb-afe2-36b2965a2049-kube-api-access-8xshw\") pod \"nova-cell1-db-create-2fjzc\" (UID: \"e7fec6e1-d1a5-49fb-afe2-36b2965a2049\") " pod="openstack/nova-cell1-db-create-2fjzc" Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.152004 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e7fec6e1-d1a5-49fb-afe2-36b2965a2049-operator-scripts\") pod \"nova-cell1-db-create-2fjzc\" (UID: \"e7fec6e1-d1a5-49fb-afe2-36b2965a2049\") " pod="openstack/nova-cell1-db-create-2fjzc" Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.180556 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8xshw\" (UniqueName: \"kubernetes.io/projected/e7fec6e1-d1a5-49fb-afe2-36b2965a2049-kube-api-access-8xshw\") pod \"nova-cell1-db-create-2fjzc\" (UID: \"e7fec6e1-d1a5-49fb-afe2-36b2965a2049\") " pod="openstack/nova-cell1-db-create-2fjzc" Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.198083 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-1d78-account-create-update-pxgb2" Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.252688 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-2fjzc" Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.253312 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjgrk\" (UniqueName: \"kubernetes.io/projected/531f78d8-c1f4-44f8-873a-0808f37b1dce-kube-api-access-gjgrk\") pod \"nova-cell0-53ac-account-create-update-pllbn\" (UID: \"531f78d8-c1f4-44f8-873a-0808f37b1dce\") " pod="openstack/nova-cell0-53ac-account-create-update-pllbn" Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.253383 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/531f78d8-c1f4-44f8-873a-0808f37b1dce-operator-scripts\") pod \"nova-cell0-53ac-account-create-update-pllbn\" (UID: \"531f78d8-c1f4-44f8-873a-0808f37b1dce\") " pod="openstack/nova-cell0-53ac-account-create-update-pllbn" Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.254141 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/531f78d8-c1f4-44f8-873a-0808f37b1dce-operator-scripts\") pod \"nova-cell0-53ac-account-create-update-pllbn\" (UID: \"531f78d8-c1f4-44f8-873a-0808f37b1dce\") " pod="openstack/nova-cell0-53ac-account-create-update-pllbn" Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.271988 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjgrk\" (UniqueName: \"kubernetes.io/projected/531f78d8-c1f4-44f8-873a-0808f37b1dce-kube-api-access-gjgrk\") pod \"nova-cell0-53ac-account-create-update-pllbn\" (UID: \"531f78d8-c1f4-44f8-873a-0808f37b1dce\") " pod="openstack/nova-cell0-53ac-account-create-update-pllbn" Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.353079 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-9041-account-create-update-kjg9f"] Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.354636 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-9041-account-create-update-kjg9f" Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.357034 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.365444 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-9041-account-create-update-kjg9f"] Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.429885 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-53ac-account-create-update-pllbn" Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.459292 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tvwtn\" (UniqueName: \"kubernetes.io/projected/19df1da5-624a-4a0c-a21f-51ffcfb74941-kube-api-access-tvwtn\") pod \"nova-cell1-9041-account-create-update-kjg9f\" (UID: \"19df1da5-624a-4a0c-a21f-51ffcfb74941\") " pod="openstack/nova-cell1-9041-account-create-update-kjg9f" Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.459356 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19df1da5-624a-4a0c-a21f-51ffcfb74941-operator-scripts\") pod \"nova-cell1-9041-account-create-update-kjg9f\" (UID: \"19df1da5-624a-4a0c-a21f-51ffcfb74941\") " pod="openstack/nova-cell1-9041-account-create-update-kjg9f" Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.537751 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-2dz4h"] Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.561410 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tvwtn\" (UniqueName: \"kubernetes.io/projected/19df1da5-624a-4a0c-a21f-51ffcfb74941-kube-api-access-tvwtn\") pod \"nova-cell1-9041-account-create-update-kjg9f\" (UID: \"19df1da5-624a-4a0c-a21f-51ffcfb74941\") " pod="openstack/nova-cell1-9041-account-create-update-kjg9f" Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.561463 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19df1da5-624a-4a0c-a21f-51ffcfb74941-operator-scripts\") pod \"nova-cell1-9041-account-create-update-kjg9f\" (UID: \"19df1da5-624a-4a0c-a21f-51ffcfb74941\") " pod="openstack/nova-cell1-9041-account-create-update-kjg9f" Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.562532 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19df1da5-624a-4a0c-a21f-51ffcfb74941-operator-scripts\") pod \"nova-cell1-9041-account-create-update-kjg9f\" (UID: \"19df1da5-624a-4a0c-a21f-51ffcfb74941\") " pod="openstack/nova-cell1-9041-account-create-update-kjg9f" Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.582319 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tvwtn\" (UniqueName: \"kubernetes.io/projected/19df1da5-624a-4a0c-a21f-51ffcfb74941-kube-api-access-tvwtn\") pod \"nova-cell1-9041-account-create-update-kjg9f\" (UID: \"19df1da5-624a-4a0c-a21f-51ffcfb74941\") " pod="openstack/nova-cell1-9041-account-create-update-kjg9f" Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.623364 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-jtx2j"] Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.687586 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-9041-account-create-update-kjg9f" Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.707111 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-jtx2j" event={"ID":"d8dfb60d-eef4-4e99-abda-f6a7de62bf6d","Type":"ContainerStarted","Data":"3fd3120ee94ef7915e49e6e28159a5a106626cf06ffd31c050b53ae758c45d75"} Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.708907 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-2dz4h" event={"ID":"5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee","Type":"ContainerStarted","Data":"2fe3d98fc1bd6e6531c24fa5515ea4e405f6d735ffa4c0cade3d995bc836b6d4"} Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.758855 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-1d78-account-create-update-pxgb2"] Jan 05 23:24:16 crc kubenswrapper[4910]: W0105 23:24:16.761134 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode95d5f5d_4fb4_47c9_ab6e_93d93949ead2.slice/crio-1cb0cd97433ecc00fc738b2662ef8c1e1eba2e9a86e044f358d1760dedbb759b WatchSource:0}: Error finding container 1cb0cd97433ecc00fc738b2662ef8c1e1eba2e9a86e044f358d1760dedbb759b: Status 404 returned error can't find the container with id 1cb0cd97433ecc00fc738b2662ef8c1e1eba2e9a86e044f358d1760dedbb759b Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.791822 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-53ac-account-create-update-pllbn"] Jan 05 23:24:16 crc kubenswrapper[4910]: I0105 23:24:16.855377 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-2fjzc"] Jan 05 23:24:17 crc kubenswrapper[4910]: I0105 23:24:17.037151 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-9041-account-create-update-kjg9f"] Jan 05 23:24:17 crc kubenswrapper[4910]: W0105 23:24:17.082527 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod19df1da5_624a_4a0c_a21f_51ffcfb74941.slice/crio-c4731e12aa96fdb20ed50aa968d607e4a03fe04f5fd58056e83d52166854b860 WatchSource:0}: Error finding container c4731e12aa96fdb20ed50aa968d607e4a03fe04f5fd58056e83d52166854b860: Status 404 returned error can't find the container with id c4731e12aa96fdb20ed50aa968d607e4a03fe04f5fd58056e83d52166854b860 Jan 05 23:24:17 crc kubenswrapper[4910]: E0105 23:24:17.333715 4910 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode95d5f5d_4fb4_47c9_ab6e_93d93949ead2.slice/crio-c947c2a056ee6f7110a8c07485a5dcb761bff4a7ae92b78f95e34531366f2aaa.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode95d5f5d_4fb4_47c9_ab6e_93d93949ead2.slice/crio-conmon-c947c2a056ee6f7110a8c07485a5dcb761bff4a7ae92b78f95e34531366f2aaa.scope\": RecentStats: unable to find data in memory cache]" Jan 05 23:24:17 crc kubenswrapper[4910]: I0105 23:24:17.724243 4910 generic.go:334] "Generic (PLEG): container finished" podID="5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee" containerID="412790b1c5b4cbd811e41a4cf74d8b0ce15938128188cd6dbed27b1e74e7d589" exitCode=0 Jan 05 23:24:17 crc kubenswrapper[4910]: I0105 23:24:17.724331 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-2dz4h" event={"ID":"5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee","Type":"ContainerDied","Data":"412790b1c5b4cbd811e41a4cf74d8b0ce15938128188cd6dbed27b1e74e7d589"} Jan 05 23:24:17 crc kubenswrapper[4910]: I0105 23:24:17.727300 4910 generic.go:334] "Generic (PLEG): container finished" podID="531f78d8-c1f4-44f8-873a-0808f37b1dce" containerID="f0b1375ad573ba32072197e2efbf5d0227f025dd09b18f47a2f36ca927bd609b" exitCode=0 Jan 05 23:24:17 crc kubenswrapper[4910]: I0105 23:24:17.727341 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-53ac-account-create-update-pllbn" event={"ID":"531f78d8-c1f4-44f8-873a-0808f37b1dce","Type":"ContainerDied","Data":"f0b1375ad573ba32072197e2efbf5d0227f025dd09b18f47a2f36ca927bd609b"} Jan 05 23:24:17 crc kubenswrapper[4910]: I0105 23:24:17.727359 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-53ac-account-create-update-pllbn" event={"ID":"531f78d8-c1f4-44f8-873a-0808f37b1dce","Type":"ContainerStarted","Data":"ec48a3c0c791fdbe627b1a9c954703f78c07a201428cf4fb0ef5bdc4e679f808"} Jan 05 23:24:17 crc kubenswrapper[4910]: I0105 23:24:17.731569 4910 generic.go:334] "Generic (PLEG): container finished" podID="19df1da5-624a-4a0c-a21f-51ffcfb74941" containerID="8e37bac4e5fde22fcb4959bed1467d329ca4527e73c3b3986bd31942a9439093" exitCode=0 Jan 05 23:24:17 crc kubenswrapper[4910]: I0105 23:24:17.731675 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-9041-account-create-update-kjg9f" event={"ID":"19df1da5-624a-4a0c-a21f-51ffcfb74941","Type":"ContainerDied","Data":"8e37bac4e5fde22fcb4959bed1467d329ca4527e73c3b3986bd31942a9439093"} Jan 05 23:24:17 crc kubenswrapper[4910]: I0105 23:24:17.731715 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-9041-account-create-update-kjg9f" event={"ID":"19df1da5-624a-4a0c-a21f-51ffcfb74941","Type":"ContainerStarted","Data":"c4731e12aa96fdb20ed50aa968d607e4a03fe04f5fd58056e83d52166854b860"} Jan 05 23:24:17 crc kubenswrapper[4910]: I0105 23:24:17.734507 4910 generic.go:334] "Generic (PLEG): container finished" podID="e95d5f5d-4fb4-47c9-ab6e-93d93949ead2" containerID="c947c2a056ee6f7110a8c07485a5dcb761bff4a7ae92b78f95e34531366f2aaa" exitCode=0 Jan 05 23:24:17 crc kubenswrapper[4910]: I0105 23:24:17.734560 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-1d78-account-create-update-pxgb2" event={"ID":"e95d5f5d-4fb4-47c9-ab6e-93d93949ead2","Type":"ContainerDied","Data":"c947c2a056ee6f7110a8c07485a5dcb761bff4a7ae92b78f95e34531366f2aaa"} Jan 05 23:24:17 crc kubenswrapper[4910]: I0105 23:24:17.734606 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-1d78-account-create-update-pxgb2" event={"ID":"e95d5f5d-4fb4-47c9-ab6e-93d93949ead2","Type":"ContainerStarted","Data":"1cb0cd97433ecc00fc738b2662ef8c1e1eba2e9a86e044f358d1760dedbb759b"} Jan 05 23:24:17 crc kubenswrapper[4910]: I0105 23:24:17.738649 4910 generic.go:334] "Generic (PLEG): container finished" podID="d8dfb60d-eef4-4e99-abda-f6a7de62bf6d" containerID="ad6f4b7dacbc8ca936f715152f6fb568538c029cf558061706a109206cbb2cd1" exitCode=0 Jan 05 23:24:17 crc kubenswrapper[4910]: I0105 23:24:17.738745 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-jtx2j" event={"ID":"d8dfb60d-eef4-4e99-abda-f6a7de62bf6d","Type":"ContainerDied","Data":"ad6f4b7dacbc8ca936f715152f6fb568538c029cf558061706a109206cbb2cd1"} Jan 05 23:24:17 crc kubenswrapper[4910]: I0105 23:24:17.741996 4910 generic.go:334] "Generic (PLEG): container finished" podID="e7fec6e1-d1a5-49fb-afe2-36b2965a2049" containerID="35f32c545e0ce39a7424bf29abc17ae9bc44202aae5316fe585acab249b0b55e" exitCode=0 Jan 05 23:24:17 crc kubenswrapper[4910]: I0105 23:24:17.742036 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-2fjzc" event={"ID":"e7fec6e1-d1a5-49fb-afe2-36b2965a2049","Type":"ContainerDied","Data":"35f32c545e0ce39a7424bf29abc17ae9bc44202aae5316fe585acab249b0b55e"} Jan 05 23:24:17 crc kubenswrapper[4910]: I0105 23:24:17.742055 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-2fjzc" event={"ID":"e7fec6e1-d1a5-49fb-afe2-36b2965a2049","Type":"ContainerStarted","Data":"ea615f00d866e3202cf8da80c7e998992dcdec0506e41404d722aedf561bbe6b"} Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.247302 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-9041-account-create-update-kjg9f" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.332764 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19df1da5-624a-4a0c-a21f-51ffcfb74941-operator-scripts\") pod \"19df1da5-624a-4a0c-a21f-51ffcfb74941\" (UID: \"19df1da5-624a-4a0c-a21f-51ffcfb74941\") " Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.332906 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tvwtn\" (UniqueName: \"kubernetes.io/projected/19df1da5-624a-4a0c-a21f-51ffcfb74941-kube-api-access-tvwtn\") pod \"19df1da5-624a-4a0c-a21f-51ffcfb74941\" (UID: \"19df1da5-624a-4a0c-a21f-51ffcfb74941\") " Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.333953 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/19df1da5-624a-4a0c-a21f-51ffcfb74941-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "19df1da5-624a-4a0c-a21f-51ffcfb74941" (UID: "19df1da5-624a-4a0c-a21f-51ffcfb74941"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.355393 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19df1da5-624a-4a0c-a21f-51ffcfb74941-kube-api-access-tvwtn" (OuterVolumeSpecName: "kube-api-access-tvwtn") pod "19df1da5-624a-4a0c-a21f-51ffcfb74941" (UID: "19df1da5-624a-4a0c-a21f-51ffcfb74941"). InnerVolumeSpecName "kube-api-access-tvwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.435329 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19df1da5-624a-4a0c-a21f-51ffcfb74941-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.435378 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tvwtn\" (UniqueName: \"kubernetes.io/projected/19df1da5-624a-4a0c-a21f-51ffcfb74941-kube-api-access-tvwtn\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.492332 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-jtx2j" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.500587 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-2fjzc" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.514168 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-1d78-account-create-update-pxgb2" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.517940 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-2dz4h" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.521320 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-53ac-account-create-update-pllbn" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.540000 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gjgrk\" (UniqueName: \"kubernetes.io/projected/531f78d8-c1f4-44f8-873a-0808f37b1dce-kube-api-access-gjgrk\") pod \"531f78d8-c1f4-44f8-873a-0808f37b1dce\" (UID: \"531f78d8-c1f4-44f8-873a-0808f37b1dce\") " Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.540057 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bbqj9\" (UniqueName: \"kubernetes.io/projected/5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee-kube-api-access-bbqj9\") pod \"5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee\" (UID: \"5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee\") " Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.540086 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lfhw4\" (UniqueName: \"kubernetes.io/projected/e95d5f5d-4fb4-47c9-ab6e-93d93949ead2-kube-api-access-lfhw4\") pod \"e95d5f5d-4fb4-47c9-ab6e-93d93949ead2\" (UID: \"e95d5f5d-4fb4-47c9-ab6e-93d93949ead2\") " Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.540479 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/531f78d8-c1f4-44f8-873a-0808f37b1dce-operator-scripts\") pod \"531f78d8-c1f4-44f8-873a-0808f37b1dce\" (UID: \"531f78d8-c1f4-44f8-873a-0808f37b1dce\") " Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.540731 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d8dfb60d-eef4-4e99-abda-f6a7de62bf6d-operator-scripts\") pod \"d8dfb60d-eef4-4e99-abda-f6a7de62bf6d\" (UID: \"d8dfb60d-eef4-4e99-abda-f6a7de62bf6d\") " Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.541067 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/531f78d8-c1f4-44f8-873a-0808f37b1dce-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "531f78d8-c1f4-44f8-873a-0808f37b1dce" (UID: "531f78d8-c1f4-44f8-873a-0808f37b1dce"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.541845 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8dfb60d-eef4-4e99-abda-f6a7de62bf6d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d8dfb60d-eef4-4e99-abda-f6a7de62bf6d" (UID: "d8dfb60d-eef4-4e99-abda-f6a7de62bf6d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.541935 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8xshw\" (UniqueName: \"kubernetes.io/projected/e7fec6e1-d1a5-49fb-afe2-36b2965a2049-kube-api-access-8xshw\") pod \"e7fec6e1-d1a5-49fb-afe2-36b2965a2049\" (UID: \"e7fec6e1-d1a5-49fb-afe2-36b2965a2049\") " Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.542109 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e95d5f5d-4fb4-47c9-ab6e-93d93949ead2-operator-scripts\") pod \"e95d5f5d-4fb4-47c9-ab6e-93d93949ead2\" (UID: \"e95d5f5d-4fb4-47c9-ab6e-93d93949ead2\") " Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.542657 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e95d5f5d-4fb4-47c9-ab6e-93d93949ead2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e95d5f5d-4fb4-47c9-ab6e-93d93949ead2" (UID: "e95d5f5d-4fb4-47c9-ab6e-93d93949ead2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.542791 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-77czx\" (UniqueName: \"kubernetes.io/projected/d8dfb60d-eef4-4e99-abda-f6a7de62bf6d-kube-api-access-77czx\") pod \"d8dfb60d-eef4-4e99-abda-f6a7de62bf6d\" (UID: \"d8dfb60d-eef4-4e99-abda-f6a7de62bf6d\") " Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.543344 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee-operator-scripts\") pod \"5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee\" (UID: \"5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee\") " Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.544087 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e7fec6e1-d1a5-49fb-afe2-36b2965a2049-operator-scripts\") pod \"e7fec6e1-d1a5-49fb-afe2-36b2965a2049\" (UID: \"e7fec6e1-d1a5-49fb-afe2-36b2965a2049\") " Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.543929 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee" (UID: "5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.544616 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7fec6e1-d1a5-49fb-afe2-36b2965a2049-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e7fec6e1-d1a5-49fb-afe2-36b2965a2049" (UID: "e7fec6e1-d1a5-49fb-afe2-36b2965a2049"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.545595 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7fec6e1-d1a5-49fb-afe2-36b2965a2049-kube-api-access-8xshw" (OuterVolumeSpecName: "kube-api-access-8xshw") pod "e7fec6e1-d1a5-49fb-afe2-36b2965a2049" (UID: "e7fec6e1-d1a5-49fb-afe2-36b2965a2049"). InnerVolumeSpecName "kube-api-access-8xshw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.546257 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e95d5f5d-4fb4-47c9-ab6e-93d93949ead2-kube-api-access-lfhw4" (OuterVolumeSpecName: "kube-api-access-lfhw4") pod "e95d5f5d-4fb4-47c9-ab6e-93d93949ead2" (UID: "e95d5f5d-4fb4-47c9-ab6e-93d93949ead2"). InnerVolumeSpecName "kube-api-access-lfhw4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.546663 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/531f78d8-c1f4-44f8-873a-0808f37b1dce-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.546711 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d8dfb60d-eef4-4e99-abda-f6a7de62bf6d-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.546726 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8xshw\" (UniqueName: \"kubernetes.io/projected/e7fec6e1-d1a5-49fb-afe2-36b2965a2049-kube-api-access-8xshw\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.546740 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e95d5f5d-4fb4-47c9-ab6e-93d93949ead2-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.546752 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.546762 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e7fec6e1-d1a5-49fb-afe2-36b2965a2049-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.546791 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lfhw4\" (UniqueName: \"kubernetes.io/projected/e95d5f5d-4fb4-47c9-ab6e-93d93949ead2-kube-api-access-lfhw4\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.547661 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8dfb60d-eef4-4e99-abda-f6a7de62bf6d-kube-api-access-77czx" (OuterVolumeSpecName: "kube-api-access-77czx") pod "d8dfb60d-eef4-4e99-abda-f6a7de62bf6d" (UID: "d8dfb60d-eef4-4e99-abda-f6a7de62bf6d"). InnerVolumeSpecName "kube-api-access-77czx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.549620 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/531f78d8-c1f4-44f8-873a-0808f37b1dce-kube-api-access-gjgrk" (OuterVolumeSpecName: "kube-api-access-gjgrk") pod "531f78d8-c1f4-44f8-873a-0808f37b1dce" (UID: "531f78d8-c1f4-44f8-873a-0808f37b1dce"). InnerVolumeSpecName "kube-api-access-gjgrk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.571864 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee-kube-api-access-bbqj9" (OuterVolumeSpecName: "kube-api-access-bbqj9") pod "5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee" (UID: "5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee"). InnerVolumeSpecName "kube-api-access-bbqj9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.649022 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-77czx\" (UniqueName: \"kubernetes.io/projected/d8dfb60d-eef4-4e99-abda-f6a7de62bf6d-kube-api-access-77czx\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.649063 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gjgrk\" (UniqueName: \"kubernetes.io/projected/531f78d8-c1f4-44f8-873a-0808f37b1dce-kube-api-access-gjgrk\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.649073 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bbqj9\" (UniqueName: \"kubernetes.io/projected/5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee-kube-api-access-bbqj9\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.768220 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-1d78-account-create-update-pxgb2" event={"ID":"e95d5f5d-4fb4-47c9-ab6e-93d93949ead2","Type":"ContainerDied","Data":"1cb0cd97433ecc00fc738b2662ef8c1e1eba2e9a86e044f358d1760dedbb759b"} Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.768276 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-1d78-account-create-update-pxgb2" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.768289 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1cb0cd97433ecc00fc738b2662ef8c1e1eba2e9a86e044f358d1760dedbb759b" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.770504 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-jtx2j" event={"ID":"d8dfb60d-eef4-4e99-abda-f6a7de62bf6d","Type":"ContainerDied","Data":"3fd3120ee94ef7915e49e6e28159a5a106626cf06ffd31c050b53ae758c45d75"} Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.770525 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-jtx2j" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.770530 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3fd3120ee94ef7915e49e6e28159a5a106626cf06ffd31c050b53ae758c45d75" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.772247 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-2fjzc" event={"ID":"e7fec6e1-d1a5-49fb-afe2-36b2965a2049","Type":"ContainerDied","Data":"ea615f00d866e3202cf8da80c7e998992dcdec0506e41404d722aedf561bbe6b"} Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.772271 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ea615f00d866e3202cf8da80c7e998992dcdec0506e41404d722aedf561bbe6b" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.772331 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-2fjzc" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.779334 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-2dz4h" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.779353 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-2dz4h" event={"ID":"5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee","Type":"ContainerDied","Data":"2fe3d98fc1bd6e6531c24fa5515ea4e405f6d735ffa4c0cade3d995bc836b6d4"} Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.779399 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2fe3d98fc1bd6e6531c24fa5515ea4e405f6d735ffa4c0cade3d995bc836b6d4" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.782158 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-53ac-account-create-update-pllbn" event={"ID":"531f78d8-c1f4-44f8-873a-0808f37b1dce","Type":"ContainerDied","Data":"ec48a3c0c791fdbe627b1a9c954703f78c07a201428cf4fb0ef5bdc4e679f808"} Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.782203 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-53ac-account-create-update-pllbn" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.782230 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ec48a3c0c791fdbe627b1a9c954703f78c07a201428cf4fb0ef5bdc4e679f808" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.784686 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-9041-account-create-update-kjg9f" event={"ID":"19df1da5-624a-4a0c-a21f-51ffcfb74941","Type":"ContainerDied","Data":"c4731e12aa96fdb20ed50aa968d607e4a03fe04f5fd58056e83d52166854b860"} Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.784713 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c4731e12aa96fdb20ed50aa968d607e4a03fe04f5fd58056e83d52166854b860" Jan 05 23:24:19 crc kubenswrapper[4910]: I0105 23:24:19.784795 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-9041-account-create-update-kjg9f" Jan 05 23:24:21 crc kubenswrapper[4910]: I0105 23:24:21.392188 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-qqqc6"] Jan 05 23:24:21 crc kubenswrapper[4910]: E0105 23:24:21.394517 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee" containerName="mariadb-database-create" Jan 05 23:24:21 crc kubenswrapper[4910]: I0105 23:24:21.394695 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee" containerName="mariadb-database-create" Jan 05 23:24:21 crc kubenswrapper[4910]: E0105 23:24:21.394813 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="531f78d8-c1f4-44f8-873a-0808f37b1dce" containerName="mariadb-account-create-update" Jan 05 23:24:21 crc kubenswrapper[4910]: I0105 23:24:21.394909 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="531f78d8-c1f4-44f8-873a-0808f37b1dce" containerName="mariadb-account-create-update" Jan 05 23:24:21 crc kubenswrapper[4910]: E0105 23:24:21.395015 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19df1da5-624a-4a0c-a21f-51ffcfb74941" containerName="mariadb-account-create-update" Jan 05 23:24:21 crc kubenswrapper[4910]: I0105 23:24:21.395153 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="19df1da5-624a-4a0c-a21f-51ffcfb74941" containerName="mariadb-account-create-update" Jan 05 23:24:21 crc kubenswrapper[4910]: E0105 23:24:21.395298 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8dfb60d-eef4-4e99-abda-f6a7de62bf6d" containerName="mariadb-database-create" Jan 05 23:24:21 crc kubenswrapper[4910]: I0105 23:24:21.395430 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8dfb60d-eef4-4e99-abda-f6a7de62bf6d" containerName="mariadb-database-create" Jan 05 23:24:21 crc kubenswrapper[4910]: E0105 23:24:21.395586 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7fec6e1-d1a5-49fb-afe2-36b2965a2049" containerName="mariadb-database-create" Jan 05 23:24:21 crc kubenswrapper[4910]: I0105 23:24:21.395702 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7fec6e1-d1a5-49fb-afe2-36b2965a2049" containerName="mariadb-database-create" Jan 05 23:24:21 crc kubenswrapper[4910]: E0105 23:24:21.395823 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e95d5f5d-4fb4-47c9-ab6e-93d93949ead2" containerName="mariadb-account-create-update" Jan 05 23:24:21 crc kubenswrapper[4910]: I0105 23:24:21.395966 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="e95d5f5d-4fb4-47c9-ab6e-93d93949ead2" containerName="mariadb-account-create-update" Jan 05 23:24:21 crc kubenswrapper[4910]: I0105 23:24:21.396365 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8dfb60d-eef4-4e99-abda-f6a7de62bf6d" containerName="mariadb-database-create" Jan 05 23:24:21 crc kubenswrapper[4910]: I0105 23:24:21.396505 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="531f78d8-c1f4-44f8-873a-0808f37b1dce" containerName="mariadb-account-create-update" Jan 05 23:24:21 crc kubenswrapper[4910]: I0105 23:24:21.396633 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="19df1da5-624a-4a0c-a21f-51ffcfb74941" containerName="mariadb-account-create-update" Jan 05 23:24:21 crc kubenswrapper[4910]: I0105 23:24:21.396778 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee" containerName="mariadb-database-create" Jan 05 23:24:21 crc kubenswrapper[4910]: I0105 23:24:21.396903 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="e7fec6e1-d1a5-49fb-afe2-36b2965a2049" containerName="mariadb-database-create" Jan 05 23:24:21 crc kubenswrapper[4910]: I0105 23:24:21.397013 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="e95d5f5d-4fb4-47c9-ab6e-93d93949ead2" containerName="mariadb-account-create-update" Jan 05 23:24:21 crc kubenswrapper[4910]: I0105 23:24:21.425094 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-qqqc6" Jan 05 23:24:21 crc kubenswrapper[4910]: I0105 23:24:21.436401 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Jan 05 23:24:21 crc kubenswrapper[4910]: I0105 23:24:21.436756 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 05 23:24:21 crc kubenswrapper[4910]: I0105 23:24:21.437619 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-glfgv" Jan 05 23:24:21 crc kubenswrapper[4910]: I0105 23:24:21.480973 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-qqqc6"] Jan 05 23:24:21 crc kubenswrapper[4910]: I0105 23:24:21.491411 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a3f6562b-e771-4c65-a192-d4ac8412ab54-scripts\") pod \"nova-cell0-conductor-db-sync-qqqc6\" (UID: \"a3f6562b-e771-4c65-a192-d4ac8412ab54\") " pod="openstack/nova-cell0-conductor-db-sync-qqqc6" Jan 05 23:24:21 crc kubenswrapper[4910]: I0105 23:24:21.491583 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3f6562b-e771-4c65-a192-d4ac8412ab54-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-qqqc6\" (UID: \"a3f6562b-e771-4c65-a192-d4ac8412ab54\") " pod="openstack/nova-cell0-conductor-db-sync-qqqc6" Jan 05 23:24:21 crc kubenswrapper[4910]: I0105 23:24:21.491847 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3f6562b-e771-4c65-a192-d4ac8412ab54-config-data\") pod \"nova-cell0-conductor-db-sync-qqqc6\" (UID: \"a3f6562b-e771-4c65-a192-d4ac8412ab54\") " pod="openstack/nova-cell0-conductor-db-sync-qqqc6" Jan 05 23:24:21 crc kubenswrapper[4910]: I0105 23:24:21.491894 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bkvjt\" (UniqueName: \"kubernetes.io/projected/a3f6562b-e771-4c65-a192-d4ac8412ab54-kube-api-access-bkvjt\") pod \"nova-cell0-conductor-db-sync-qqqc6\" (UID: \"a3f6562b-e771-4c65-a192-d4ac8412ab54\") " pod="openstack/nova-cell0-conductor-db-sync-qqqc6" Jan 05 23:24:21 crc kubenswrapper[4910]: I0105 23:24:21.594688 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3f6562b-e771-4c65-a192-d4ac8412ab54-config-data\") pod \"nova-cell0-conductor-db-sync-qqqc6\" (UID: \"a3f6562b-e771-4c65-a192-d4ac8412ab54\") " pod="openstack/nova-cell0-conductor-db-sync-qqqc6" Jan 05 23:24:21 crc kubenswrapper[4910]: I0105 23:24:21.594764 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bkvjt\" (UniqueName: \"kubernetes.io/projected/a3f6562b-e771-4c65-a192-d4ac8412ab54-kube-api-access-bkvjt\") pod \"nova-cell0-conductor-db-sync-qqqc6\" (UID: \"a3f6562b-e771-4c65-a192-d4ac8412ab54\") " pod="openstack/nova-cell0-conductor-db-sync-qqqc6" Jan 05 23:24:21 crc kubenswrapper[4910]: I0105 23:24:21.594848 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a3f6562b-e771-4c65-a192-d4ac8412ab54-scripts\") pod \"nova-cell0-conductor-db-sync-qqqc6\" (UID: \"a3f6562b-e771-4c65-a192-d4ac8412ab54\") " pod="openstack/nova-cell0-conductor-db-sync-qqqc6" Jan 05 23:24:21 crc kubenswrapper[4910]: I0105 23:24:21.594933 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3f6562b-e771-4c65-a192-d4ac8412ab54-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-qqqc6\" (UID: \"a3f6562b-e771-4c65-a192-d4ac8412ab54\") " pod="openstack/nova-cell0-conductor-db-sync-qqqc6" Jan 05 23:24:21 crc kubenswrapper[4910]: I0105 23:24:21.600660 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3f6562b-e771-4c65-a192-d4ac8412ab54-config-data\") pod \"nova-cell0-conductor-db-sync-qqqc6\" (UID: \"a3f6562b-e771-4c65-a192-d4ac8412ab54\") " pod="openstack/nova-cell0-conductor-db-sync-qqqc6" Jan 05 23:24:21 crc kubenswrapper[4910]: I0105 23:24:21.605686 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a3f6562b-e771-4c65-a192-d4ac8412ab54-scripts\") pod \"nova-cell0-conductor-db-sync-qqqc6\" (UID: \"a3f6562b-e771-4c65-a192-d4ac8412ab54\") " pod="openstack/nova-cell0-conductor-db-sync-qqqc6" Jan 05 23:24:21 crc kubenswrapper[4910]: I0105 23:24:21.609966 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3f6562b-e771-4c65-a192-d4ac8412ab54-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-qqqc6\" (UID: \"a3f6562b-e771-4c65-a192-d4ac8412ab54\") " pod="openstack/nova-cell0-conductor-db-sync-qqqc6" Jan 05 23:24:21 crc kubenswrapper[4910]: I0105 23:24:21.613749 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bkvjt\" (UniqueName: \"kubernetes.io/projected/a3f6562b-e771-4c65-a192-d4ac8412ab54-kube-api-access-bkvjt\") pod \"nova-cell0-conductor-db-sync-qqqc6\" (UID: \"a3f6562b-e771-4c65-a192-d4ac8412ab54\") " pod="openstack/nova-cell0-conductor-db-sync-qqqc6" Jan 05 23:24:21 crc kubenswrapper[4910]: I0105 23:24:21.747548 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-qqqc6" Jan 05 23:24:22 crc kubenswrapper[4910]: I0105 23:24:22.281089 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-qqqc6"] Jan 05 23:24:22 crc kubenswrapper[4910]: W0105 23:24:22.293388 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda3f6562b_e771_4c65_a192_d4ac8412ab54.slice/crio-9408bf3a1d4730048b7016106e4da8dfb04ee3771a29021f00691850f6604e0f WatchSource:0}: Error finding container 9408bf3a1d4730048b7016106e4da8dfb04ee3771a29021f00691850f6604e0f: Status 404 returned error can't find the container with id 9408bf3a1d4730048b7016106e4da8dfb04ee3771a29021f00691850f6604e0f Jan 05 23:24:22 crc kubenswrapper[4910]: I0105 23:24:22.827429 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-qqqc6" event={"ID":"a3f6562b-e771-4c65-a192-d4ac8412ab54","Type":"ContainerStarted","Data":"073856c0d67c155e57154f42bd7c12e2c86e471b010f40848bf1ebb6ae9e9e30"} Jan 05 23:24:22 crc kubenswrapper[4910]: I0105 23:24:22.829529 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-qqqc6" event={"ID":"a3f6562b-e771-4c65-a192-d4ac8412ab54","Type":"ContainerStarted","Data":"9408bf3a1d4730048b7016106e4da8dfb04ee3771a29021f00691850f6604e0f"} Jan 05 23:24:22 crc kubenswrapper[4910]: I0105 23:24:22.845915 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-qqqc6" podStartSLOduration=1.845895272 podStartE2EDuration="1.845895272s" podCreationTimestamp="2026-01-05 23:24:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:24:22.840445367 +0000 UTC m=+5594.417943037" watchObservedRunningTime="2026-01-05 23:24:22.845895272 +0000 UTC m=+5594.423392932" Jan 05 23:24:23 crc kubenswrapper[4910]: I0105 23:24:23.722893 4910 scope.go:117] "RemoveContainer" containerID="c31f0b3ea3ce0d4b0fb62da87fc6d8144c3940f5821592c646ec514147dae31e" Jan 05 23:24:24 crc kubenswrapper[4910]: I0105 23:24:24.857244 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerStarted","Data":"0e240e4effc2bd679e0f96fec5bc054d5530ae8a8dd2bd9c82e2bc521473387b"} Jan 05 23:24:28 crc kubenswrapper[4910]: I0105 23:24:28.903679 4910 generic.go:334] "Generic (PLEG): container finished" podID="a3f6562b-e771-4c65-a192-d4ac8412ab54" containerID="073856c0d67c155e57154f42bd7c12e2c86e471b010f40848bf1ebb6ae9e9e30" exitCode=0 Jan 05 23:24:28 crc kubenswrapper[4910]: I0105 23:24:28.903758 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-qqqc6" event={"ID":"a3f6562b-e771-4c65-a192-d4ac8412ab54","Type":"ContainerDied","Data":"073856c0d67c155e57154f42bd7c12e2c86e471b010f40848bf1ebb6ae9e9e30"} Jan 05 23:24:30 crc kubenswrapper[4910]: I0105 23:24:30.385824 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-qqqc6" Jan 05 23:24:30 crc kubenswrapper[4910]: I0105 23:24:30.401764 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3f6562b-e771-4c65-a192-d4ac8412ab54-config-data\") pod \"a3f6562b-e771-4c65-a192-d4ac8412ab54\" (UID: \"a3f6562b-e771-4c65-a192-d4ac8412ab54\") " Jan 05 23:24:30 crc kubenswrapper[4910]: I0105 23:24:30.402091 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3f6562b-e771-4c65-a192-d4ac8412ab54-combined-ca-bundle\") pod \"a3f6562b-e771-4c65-a192-d4ac8412ab54\" (UID: \"a3f6562b-e771-4c65-a192-d4ac8412ab54\") " Jan 05 23:24:30 crc kubenswrapper[4910]: I0105 23:24:30.402405 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a3f6562b-e771-4c65-a192-d4ac8412ab54-scripts\") pod \"a3f6562b-e771-4c65-a192-d4ac8412ab54\" (UID: \"a3f6562b-e771-4c65-a192-d4ac8412ab54\") " Jan 05 23:24:30 crc kubenswrapper[4910]: I0105 23:24:30.402492 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bkvjt\" (UniqueName: \"kubernetes.io/projected/a3f6562b-e771-4c65-a192-d4ac8412ab54-kube-api-access-bkvjt\") pod \"a3f6562b-e771-4c65-a192-d4ac8412ab54\" (UID: \"a3f6562b-e771-4c65-a192-d4ac8412ab54\") " Jan 05 23:24:30 crc kubenswrapper[4910]: I0105 23:24:30.415277 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a3f6562b-e771-4c65-a192-d4ac8412ab54-kube-api-access-bkvjt" (OuterVolumeSpecName: "kube-api-access-bkvjt") pod "a3f6562b-e771-4c65-a192-d4ac8412ab54" (UID: "a3f6562b-e771-4c65-a192-d4ac8412ab54"). InnerVolumeSpecName "kube-api-access-bkvjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:24:30 crc kubenswrapper[4910]: I0105 23:24:30.415935 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3f6562b-e771-4c65-a192-d4ac8412ab54-scripts" (OuterVolumeSpecName: "scripts") pod "a3f6562b-e771-4c65-a192-d4ac8412ab54" (UID: "a3f6562b-e771-4c65-a192-d4ac8412ab54"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:24:30 crc kubenswrapper[4910]: I0105 23:24:30.444858 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3f6562b-e771-4c65-a192-d4ac8412ab54-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a3f6562b-e771-4c65-a192-d4ac8412ab54" (UID: "a3f6562b-e771-4c65-a192-d4ac8412ab54"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:24:30 crc kubenswrapper[4910]: I0105 23:24:30.454960 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3f6562b-e771-4c65-a192-d4ac8412ab54-config-data" (OuterVolumeSpecName: "config-data") pod "a3f6562b-e771-4c65-a192-d4ac8412ab54" (UID: "a3f6562b-e771-4c65-a192-d4ac8412ab54"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:24:30 crc kubenswrapper[4910]: I0105 23:24:30.511629 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3f6562b-e771-4c65-a192-d4ac8412ab54-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:30 crc kubenswrapper[4910]: I0105 23:24:30.511687 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a3f6562b-e771-4c65-a192-d4ac8412ab54-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:30 crc kubenswrapper[4910]: I0105 23:24:30.511783 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bkvjt\" (UniqueName: \"kubernetes.io/projected/a3f6562b-e771-4c65-a192-d4ac8412ab54-kube-api-access-bkvjt\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:30 crc kubenswrapper[4910]: I0105 23:24:30.511803 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3f6562b-e771-4c65-a192-d4ac8412ab54-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:30 crc kubenswrapper[4910]: I0105 23:24:30.931355 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-qqqc6" event={"ID":"a3f6562b-e771-4c65-a192-d4ac8412ab54","Type":"ContainerDied","Data":"9408bf3a1d4730048b7016106e4da8dfb04ee3771a29021f00691850f6604e0f"} Jan 05 23:24:30 crc kubenswrapper[4910]: I0105 23:24:30.931846 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9408bf3a1d4730048b7016106e4da8dfb04ee3771a29021f00691850f6604e0f" Jan 05 23:24:30 crc kubenswrapper[4910]: I0105 23:24:30.931456 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-qqqc6" Jan 05 23:24:31 crc kubenswrapper[4910]: I0105 23:24:31.052673 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 05 23:24:31 crc kubenswrapper[4910]: E0105 23:24:31.053451 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3f6562b-e771-4c65-a192-d4ac8412ab54" containerName="nova-cell0-conductor-db-sync" Jan 05 23:24:31 crc kubenswrapper[4910]: I0105 23:24:31.053485 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3f6562b-e771-4c65-a192-d4ac8412ab54" containerName="nova-cell0-conductor-db-sync" Jan 05 23:24:31 crc kubenswrapper[4910]: I0105 23:24:31.053916 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="a3f6562b-e771-4c65-a192-d4ac8412ab54" containerName="nova-cell0-conductor-db-sync" Jan 05 23:24:31 crc kubenswrapper[4910]: I0105 23:24:31.055178 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 05 23:24:31 crc kubenswrapper[4910]: I0105 23:24:31.058225 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-glfgv" Jan 05 23:24:31 crc kubenswrapper[4910]: I0105 23:24:31.059075 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 05 23:24:31 crc kubenswrapper[4910]: I0105 23:24:31.069101 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 05 23:24:31 crc kubenswrapper[4910]: I0105 23:24:31.128256 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5vmvt\" (UniqueName: \"kubernetes.io/projected/9875106d-cbf9-402b-b7c1-0c3d00445606-kube-api-access-5vmvt\") pod \"nova-cell0-conductor-0\" (UID: \"9875106d-cbf9-402b-b7c1-0c3d00445606\") " pod="openstack/nova-cell0-conductor-0" Jan 05 23:24:31 crc kubenswrapper[4910]: I0105 23:24:31.128400 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9875106d-cbf9-402b-b7c1-0c3d00445606-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"9875106d-cbf9-402b-b7c1-0c3d00445606\") " pod="openstack/nova-cell0-conductor-0" Jan 05 23:24:31 crc kubenswrapper[4910]: I0105 23:24:31.128500 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9875106d-cbf9-402b-b7c1-0c3d00445606-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"9875106d-cbf9-402b-b7c1-0c3d00445606\") " pod="openstack/nova-cell0-conductor-0" Jan 05 23:24:31 crc kubenswrapper[4910]: I0105 23:24:31.230765 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5vmvt\" (UniqueName: \"kubernetes.io/projected/9875106d-cbf9-402b-b7c1-0c3d00445606-kube-api-access-5vmvt\") pod \"nova-cell0-conductor-0\" (UID: \"9875106d-cbf9-402b-b7c1-0c3d00445606\") " pod="openstack/nova-cell0-conductor-0" Jan 05 23:24:31 crc kubenswrapper[4910]: I0105 23:24:31.230880 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9875106d-cbf9-402b-b7c1-0c3d00445606-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"9875106d-cbf9-402b-b7c1-0c3d00445606\") " pod="openstack/nova-cell0-conductor-0" Jan 05 23:24:31 crc kubenswrapper[4910]: I0105 23:24:31.230969 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9875106d-cbf9-402b-b7c1-0c3d00445606-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"9875106d-cbf9-402b-b7c1-0c3d00445606\") " pod="openstack/nova-cell0-conductor-0" Jan 05 23:24:31 crc kubenswrapper[4910]: I0105 23:24:31.236618 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9875106d-cbf9-402b-b7c1-0c3d00445606-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"9875106d-cbf9-402b-b7c1-0c3d00445606\") " pod="openstack/nova-cell0-conductor-0" Jan 05 23:24:31 crc kubenswrapper[4910]: I0105 23:24:31.245374 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9875106d-cbf9-402b-b7c1-0c3d00445606-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"9875106d-cbf9-402b-b7c1-0c3d00445606\") " pod="openstack/nova-cell0-conductor-0" Jan 05 23:24:31 crc kubenswrapper[4910]: I0105 23:24:31.251272 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5vmvt\" (UniqueName: \"kubernetes.io/projected/9875106d-cbf9-402b-b7c1-0c3d00445606-kube-api-access-5vmvt\") pod \"nova-cell0-conductor-0\" (UID: \"9875106d-cbf9-402b-b7c1-0c3d00445606\") " pod="openstack/nova-cell0-conductor-0" Jan 05 23:24:31 crc kubenswrapper[4910]: I0105 23:24:31.380211 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 05 23:24:32 crc kubenswrapper[4910]: I0105 23:24:32.687306 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 05 23:24:32 crc kubenswrapper[4910]: W0105 23:24:32.694032 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9875106d_cbf9_402b_b7c1_0c3d00445606.slice/crio-c9ffdda37517b1da6427794deebd7cf45deb559d1704f699a353655e2cf05609 WatchSource:0}: Error finding container c9ffdda37517b1da6427794deebd7cf45deb559d1704f699a353655e2cf05609: Status 404 returned error can't find the container with id c9ffdda37517b1da6427794deebd7cf45deb559d1704f699a353655e2cf05609 Jan 05 23:24:32 crc kubenswrapper[4910]: I0105 23:24:32.956658 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"9875106d-cbf9-402b-b7c1-0c3d00445606","Type":"ContainerStarted","Data":"1312808f6d5b1a456e98d8336d8d347e1eea4c15ed28035eb42e1dd2f964718d"} Jan 05 23:24:32 crc kubenswrapper[4910]: I0105 23:24:32.957254 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Jan 05 23:24:32 crc kubenswrapper[4910]: I0105 23:24:32.957272 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"9875106d-cbf9-402b-b7c1-0c3d00445606","Type":"ContainerStarted","Data":"c9ffdda37517b1da6427794deebd7cf45deb559d1704f699a353655e2cf05609"} Jan 05 23:24:32 crc kubenswrapper[4910]: I0105 23:24:32.977347 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=1.977311045 podStartE2EDuration="1.977311045s" podCreationTimestamp="2026-01-05 23:24:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:24:32.972530357 +0000 UTC m=+5604.550028027" watchObservedRunningTime="2026-01-05 23:24:32.977311045 +0000 UTC m=+5604.554808765" Jan 05 23:24:41 crc kubenswrapper[4910]: I0105 23:24:41.426418 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Jan 05 23:24:41 crc kubenswrapper[4910]: I0105 23:24:41.979326 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-7j6xw"] Jan 05 23:24:41 crc kubenswrapper[4910]: I0105 23:24:41.981675 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-7j6xw" Jan 05 23:24:41 crc kubenswrapper[4910]: I0105 23:24:41.986413 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-7j6xw"] Jan 05 23:24:41 crc kubenswrapper[4910]: I0105 23:24:41.987310 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Jan 05 23:24:41 crc kubenswrapper[4910]: I0105 23:24:41.988035 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.080332 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-658cj\" (UniqueName: \"kubernetes.io/projected/987fcce4-1c3a-4ffb-b340-65abf751215a-kube-api-access-658cj\") pod \"nova-cell0-cell-mapping-7j6xw\" (UID: \"987fcce4-1c3a-4ffb-b340-65abf751215a\") " pod="openstack/nova-cell0-cell-mapping-7j6xw" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.080601 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/987fcce4-1c3a-4ffb-b340-65abf751215a-config-data\") pod \"nova-cell0-cell-mapping-7j6xw\" (UID: \"987fcce4-1c3a-4ffb-b340-65abf751215a\") " pod="openstack/nova-cell0-cell-mapping-7j6xw" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.080712 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/987fcce4-1c3a-4ffb-b340-65abf751215a-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-7j6xw\" (UID: \"987fcce4-1c3a-4ffb-b340-65abf751215a\") " pod="openstack/nova-cell0-cell-mapping-7j6xw" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.080788 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/987fcce4-1c3a-4ffb-b340-65abf751215a-scripts\") pod \"nova-cell0-cell-mapping-7j6xw\" (UID: \"987fcce4-1c3a-4ffb-b340-65abf751215a\") " pod="openstack/nova-cell0-cell-mapping-7j6xw" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.111273 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.112801 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.116384 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.136638 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.175481 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.177174 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.187317 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/987fcce4-1c3a-4ffb-b340-65abf751215a-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-7j6xw\" (UID: \"987fcce4-1c3a-4ffb-b340-65abf751215a\") " pod="openstack/nova-cell0-cell-mapping-7j6xw" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.187394 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/987fcce4-1c3a-4ffb-b340-65abf751215a-scripts\") pod \"nova-cell0-cell-mapping-7j6xw\" (UID: \"987fcce4-1c3a-4ffb-b340-65abf751215a\") " pod="openstack/nova-cell0-cell-mapping-7j6xw" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.187485 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-658cj\" (UniqueName: \"kubernetes.io/projected/987fcce4-1c3a-4ffb-b340-65abf751215a-kube-api-access-658cj\") pod \"nova-cell0-cell-mapping-7j6xw\" (UID: \"987fcce4-1c3a-4ffb-b340-65abf751215a\") " pod="openstack/nova-cell0-cell-mapping-7j6xw" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.187555 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/987fcce4-1c3a-4ffb-b340-65abf751215a-config-data\") pod \"nova-cell0-cell-mapping-7j6xw\" (UID: \"987fcce4-1c3a-4ffb-b340-65abf751215a\") " pod="openstack/nova-cell0-cell-mapping-7j6xw" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.192442 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.204461 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/987fcce4-1c3a-4ffb-b340-65abf751215a-scripts\") pod \"nova-cell0-cell-mapping-7j6xw\" (UID: \"987fcce4-1c3a-4ffb-b340-65abf751215a\") " pod="openstack/nova-cell0-cell-mapping-7j6xw" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.210357 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/987fcce4-1c3a-4ffb-b340-65abf751215a-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-7j6xw\" (UID: \"987fcce4-1c3a-4ffb-b340-65abf751215a\") " pod="openstack/nova-cell0-cell-mapping-7j6xw" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.223049 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/987fcce4-1c3a-4ffb-b340-65abf751215a-config-data\") pod \"nova-cell0-cell-mapping-7j6xw\" (UID: \"987fcce4-1c3a-4ffb-b340-65abf751215a\") " pod="openstack/nova-cell0-cell-mapping-7j6xw" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.230731 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-658cj\" (UniqueName: \"kubernetes.io/projected/987fcce4-1c3a-4ffb-b340-65abf751215a-kube-api-access-658cj\") pod \"nova-cell0-cell-mapping-7j6xw\" (UID: \"987fcce4-1c3a-4ffb-b340-65abf751215a\") " pod="openstack/nova-cell0-cell-mapping-7j6xw" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.254257 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.287488 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b95997f7-6qm88"] Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.292048 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-49gbr\" (UniqueName: \"kubernetes.io/projected/4b5eae5b-ab47-4d68-a2b5-8af634824d09-kube-api-access-49gbr\") pod \"nova-api-0\" (UID: \"4b5eae5b-ab47-4d68-a2b5-8af634824d09\") " pod="openstack/nova-api-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.292234 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b5eae5b-ab47-4d68-a2b5-8af634824d09-config-data\") pod \"nova-api-0\" (UID: \"4b5eae5b-ab47-4d68-a2b5-8af634824d09\") " pod="openstack/nova-api-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.292335 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-npgvg\" (UniqueName: \"kubernetes.io/projected/03c62550-19aa-4c0b-9237-54d78b0ce624-kube-api-access-npgvg\") pod \"nova-metadata-0\" (UID: \"03c62550-19aa-4c0b-9237-54d78b0ce624\") " pod="openstack/nova-metadata-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.292460 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03c62550-19aa-4c0b-9237-54d78b0ce624-config-data\") pod \"nova-metadata-0\" (UID: \"03c62550-19aa-4c0b-9237-54d78b0ce624\") " pod="openstack/nova-metadata-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.292580 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b5eae5b-ab47-4d68-a2b5-8af634824d09-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4b5eae5b-ab47-4d68-a2b5-8af634824d09\") " pod="openstack/nova-api-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.292679 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b5eae5b-ab47-4d68-a2b5-8af634824d09-logs\") pod \"nova-api-0\" (UID: \"4b5eae5b-ab47-4d68-a2b5-8af634824d09\") " pod="openstack/nova-api-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.292745 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03c62550-19aa-4c0b-9237-54d78b0ce624-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"03c62550-19aa-4c0b-9237-54d78b0ce624\") " pod="openstack/nova-metadata-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.292865 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/03c62550-19aa-4c0b-9237-54d78b0ce624-logs\") pod \"nova-metadata-0\" (UID: \"03c62550-19aa-4c0b-9237-54d78b0ce624\") " pod="openstack/nova-metadata-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.299352 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b95997f7-6qm88" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.305231 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-7j6xw" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.318381 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b95997f7-6qm88"] Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.337526 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.338877 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.341799 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.347694 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.393964 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-npgvg\" (UniqueName: \"kubernetes.io/projected/03c62550-19aa-4c0b-9237-54d78b0ce624-kube-api-access-npgvg\") pod \"nova-metadata-0\" (UID: \"03c62550-19aa-4c0b-9237-54d78b0ce624\") " pod="openstack/nova-metadata-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.394088 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03c62550-19aa-4c0b-9237-54d78b0ce624-config-data\") pod \"nova-metadata-0\" (UID: \"03c62550-19aa-4c0b-9237-54d78b0ce624\") " pod="openstack/nova-metadata-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.394154 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b5eae5b-ab47-4d68-a2b5-8af634824d09-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4b5eae5b-ab47-4d68-a2b5-8af634824d09\") " pod="openstack/nova-api-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.394194 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b5eae5b-ab47-4d68-a2b5-8af634824d09-logs\") pod \"nova-api-0\" (UID: \"4b5eae5b-ab47-4d68-a2b5-8af634824d09\") " pod="openstack/nova-api-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.394213 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03c62550-19aa-4c0b-9237-54d78b0ce624-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"03c62550-19aa-4c0b-9237-54d78b0ce624\") " pod="openstack/nova-metadata-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.394281 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/03c62550-19aa-4c0b-9237-54d78b0ce624-logs\") pod \"nova-metadata-0\" (UID: \"03c62550-19aa-4c0b-9237-54d78b0ce624\") " pod="openstack/nova-metadata-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.394309 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-49gbr\" (UniqueName: \"kubernetes.io/projected/4b5eae5b-ab47-4d68-a2b5-8af634824d09-kube-api-access-49gbr\") pod \"nova-api-0\" (UID: \"4b5eae5b-ab47-4d68-a2b5-8af634824d09\") " pod="openstack/nova-api-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.394332 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b5eae5b-ab47-4d68-a2b5-8af634824d09-config-data\") pod \"nova-api-0\" (UID: \"4b5eae5b-ab47-4d68-a2b5-8af634824d09\") " pod="openstack/nova-api-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.394937 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b5eae5b-ab47-4d68-a2b5-8af634824d09-logs\") pod \"nova-api-0\" (UID: \"4b5eae5b-ab47-4d68-a2b5-8af634824d09\") " pod="openstack/nova-api-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.394960 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/03c62550-19aa-4c0b-9237-54d78b0ce624-logs\") pod \"nova-metadata-0\" (UID: \"03c62550-19aa-4c0b-9237-54d78b0ce624\") " pod="openstack/nova-metadata-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.403858 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b5eae5b-ab47-4d68-a2b5-8af634824d09-config-data\") pod \"nova-api-0\" (UID: \"4b5eae5b-ab47-4d68-a2b5-8af634824d09\") " pod="openstack/nova-api-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.406743 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b5eae5b-ab47-4d68-a2b5-8af634824d09-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4b5eae5b-ab47-4d68-a2b5-8af634824d09\") " pod="openstack/nova-api-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.414203 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03c62550-19aa-4c0b-9237-54d78b0ce624-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"03c62550-19aa-4c0b-9237-54d78b0ce624\") " pod="openstack/nova-metadata-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.414866 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03c62550-19aa-4c0b-9237-54d78b0ce624-config-data\") pod \"nova-metadata-0\" (UID: \"03c62550-19aa-4c0b-9237-54d78b0ce624\") " pod="openstack/nova-metadata-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.433149 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-npgvg\" (UniqueName: \"kubernetes.io/projected/03c62550-19aa-4c0b-9237-54d78b0ce624-kube-api-access-npgvg\") pod \"nova-metadata-0\" (UID: \"03c62550-19aa-4c0b-9237-54d78b0ce624\") " pod="openstack/nova-metadata-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.436783 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-49gbr\" (UniqueName: \"kubernetes.io/projected/4b5eae5b-ab47-4d68-a2b5-8af634824d09-kube-api-access-49gbr\") pod \"nova-api-0\" (UID: \"4b5eae5b-ab47-4d68-a2b5-8af634824d09\") " pod="openstack/nova-api-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.444015 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.453884 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.462831 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.496325 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a29badab-486f-44b9-a355-de373ae072a4-dns-svc\") pod \"dnsmasq-dns-5b95997f7-6qm88\" (UID: \"a29badab-486f-44b9-a355-de373ae072a4\") " pod="openstack/dnsmasq-dns-5b95997f7-6qm88" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.496369 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ts4fn\" (UniqueName: \"kubernetes.io/projected/a29badab-486f-44b9-a355-de373ae072a4-kube-api-access-ts4fn\") pod \"dnsmasq-dns-5b95997f7-6qm88\" (UID: \"a29badab-486f-44b9-a355-de373ae072a4\") " pod="openstack/dnsmasq-dns-5b95997f7-6qm88" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.496470 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tt8hr\" (UniqueName: \"kubernetes.io/projected/754e2277-f84a-46a9-b7d8-789d955fe259-kube-api-access-tt8hr\") pod \"nova-scheduler-0\" (UID: \"754e2277-f84a-46a9-b7d8-789d955fe259\") " pod="openstack/nova-scheduler-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.496497 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a29badab-486f-44b9-a355-de373ae072a4-ovsdbserver-sb\") pod \"dnsmasq-dns-5b95997f7-6qm88\" (UID: \"a29badab-486f-44b9-a355-de373ae072a4\") " pod="openstack/dnsmasq-dns-5b95997f7-6qm88" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.496522 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/754e2277-f84a-46a9-b7d8-789d955fe259-config-data\") pod \"nova-scheduler-0\" (UID: \"754e2277-f84a-46a9-b7d8-789d955fe259\") " pod="openstack/nova-scheduler-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.496538 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a29badab-486f-44b9-a355-de373ae072a4-ovsdbserver-nb\") pod \"dnsmasq-dns-5b95997f7-6qm88\" (UID: \"a29badab-486f-44b9-a355-de373ae072a4\") " pod="openstack/dnsmasq-dns-5b95997f7-6qm88" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.496594 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/754e2277-f84a-46a9-b7d8-789d955fe259-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"754e2277-f84a-46a9-b7d8-789d955fe259\") " pod="openstack/nova-scheduler-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.496619 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a29badab-486f-44b9-a355-de373ae072a4-config\") pod \"dnsmasq-dns-5b95997f7-6qm88\" (UID: \"a29badab-486f-44b9-a355-de373ae072a4\") " pod="openstack/dnsmasq-dns-5b95997f7-6qm88" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.502573 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.584298 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.598625 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/754e2277-f84a-46a9-b7d8-789d955fe259-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"754e2277-f84a-46a9-b7d8-789d955fe259\") " pod="openstack/nova-scheduler-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.598694 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81e1f339-37da-4c90-9f60-b4d369ea06a9-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"81e1f339-37da-4c90-9f60-b4d369ea06a9\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.598723 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a29badab-486f-44b9-a355-de373ae072a4-config\") pod \"dnsmasq-dns-5b95997f7-6qm88\" (UID: \"a29badab-486f-44b9-a355-de373ae072a4\") " pod="openstack/dnsmasq-dns-5b95997f7-6qm88" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.598753 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a29badab-486f-44b9-a355-de373ae072a4-dns-svc\") pod \"dnsmasq-dns-5b95997f7-6qm88\" (UID: \"a29badab-486f-44b9-a355-de373ae072a4\") " pod="openstack/dnsmasq-dns-5b95997f7-6qm88" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.598778 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ts4fn\" (UniqueName: \"kubernetes.io/projected/a29badab-486f-44b9-a355-de373ae072a4-kube-api-access-ts4fn\") pod \"dnsmasq-dns-5b95997f7-6qm88\" (UID: \"a29badab-486f-44b9-a355-de373ae072a4\") " pod="openstack/dnsmasq-dns-5b95997f7-6qm88" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.598842 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81e1f339-37da-4c90-9f60-b4d369ea06a9-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"81e1f339-37da-4c90-9f60-b4d369ea06a9\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.598912 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tt8hr\" (UniqueName: \"kubernetes.io/projected/754e2277-f84a-46a9-b7d8-789d955fe259-kube-api-access-tt8hr\") pod \"nova-scheduler-0\" (UID: \"754e2277-f84a-46a9-b7d8-789d955fe259\") " pod="openstack/nova-scheduler-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.598959 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a29badab-486f-44b9-a355-de373ae072a4-ovsdbserver-sb\") pod \"dnsmasq-dns-5b95997f7-6qm88\" (UID: \"a29badab-486f-44b9-a355-de373ae072a4\") " pod="openstack/dnsmasq-dns-5b95997f7-6qm88" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.598985 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbp6h\" (UniqueName: \"kubernetes.io/projected/81e1f339-37da-4c90-9f60-b4d369ea06a9-kube-api-access-dbp6h\") pod \"nova-cell1-novncproxy-0\" (UID: \"81e1f339-37da-4c90-9f60-b4d369ea06a9\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.599008 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/754e2277-f84a-46a9-b7d8-789d955fe259-config-data\") pod \"nova-scheduler-0\" (UID: \"754e2277-f84a-46a9-b7d8-789d955fe259\") " pod="openstack/nova-scheduler-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.599026 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a29badab-486f-44b9-a355-de373ae072a4-ovsdbserver-nb\") pod \"dnsmasq-dns-5b95997f7-6qm88\" (UID: \"a29badab-486f-44b9-a355-de373ae072a4\") " pod="openstack/dnsmasq-dns-5b95997f7-6qm88" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.600236 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a29badab-486f-44b9-a355-de373ae072a4-ovsdbserver-nb\") pod \"dnsmasq-dns-5b95997f7-6qm88\" (UID: \"a29badab-486f-44b9-a355-de373ae072a4\") " pod="openstack/dnsmasq-dns-5b95997f7-6qm88" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.600832 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a29badab-486f-44b9-a355-de373ae072a4-config\") pod \"dnsmasq-dns-5b95997f7-6qm88\" (UID: \"a29badab-486f-44b9-a355-de373ae072a4\") " pod="openstack/dnsmasq-dns-5b95997f7-6qm88" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.601438 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a29badab-486f-44b9-a355-de373ae072a4-dns-svc\") pod \"dnsmasq-dns-5b95997f7-6qm88\" (UID: \"a29badab-486f-44b9-a355-de373ae072a4\") " pod="openstack/dnsmasq-dns-5b95997f7-6qm88" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.602591 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a29badab-486f-44b9-a355-de373ae072a4-ovsdbserver-sb\") pod \"dnsmasq-dns-5b95997f7-6qm88\" (UID: \"a29badab-486f-44b9-a355-de373ae072a4\") " pod="openstack/dnsmasq-dns-5b95997f7-6qm88" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.609430 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/754e2277-f84a-46a9-b7d8-789d955fe259-config-data\") pod \"nova-scheduler-0\" (UID: \"754e2277-f84a-46a9-b7d8-789d955fe259\") " pod="openstack/nova-scheduler-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.611298 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/754e2277-f84a-46a9-b7d8-789d955fe259-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"754e2277-f84a-46a9-b7d8-789d955fe259\") " pod="openstack/nova-scheduler-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.626773 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ts4fn\" (UniqueName: \"kubernetes.io/projected/a29badab-486f-44b9-a355-de373ae072a4-kube-api-access-ts4fn\") pod \"dnsmasq-dns-5b95997f7-6qm88\" (UID: \"a29badab-486f-44b9-a355-de373ae072a4\") " pod="openstack/dnsmasq-dns-5b95997f7-6qm88" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.629814 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tt8hr\" (UniqueName: \"kubernetes.io/projected/754e2277-f84a-46a9-b7d8-789d955fe259-kube-api-access-tt8hr\") pod \"nova-scheduler-0\" (UID: \"754e2277-f84a-46a9-b7d8-789d955fe259\") " pod="openstack/nova-scheduler-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.637090 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b95997f7-6qm88" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.703592 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81e1f339-37da-4c90-9f60-b4d369ea06a9-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"81e1f339-37da-4c90-9f60-b4d369ea06a9\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.703928 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81e1f339-37da-4c90-9f60-b4d369ea06a9-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"81e1f339-37da-4c90-9f60-b4d369ea06a9\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.703993 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbp6h\" (UniqueName: \"kubernetes.io/projected/81e1f339-37da-4c90-9f60-b4d369ea06a9-kube-api-access-dbp6h\") pod \"nova-cell1-novncproxy-0\" (UID: \"81e1f339-37da-4c90-9f60-b4d369ea06a9\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.707874 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81e1f339-37da-4c90-9f60-b4d369ea06a9-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"81e1f339-37da-4c90-9f60-b4d369ea06a9\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.710239 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81e1f339-37da-4c90-9f60-b4d369ea06a9-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"81e1f339-37da-4c90-9f60-b4d369ea06a9\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.727031 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbp6h\" (UniqueName: \"kubernetes.io/projected/81e1f339-37da-4c90-9f60-b4d369ea06a9-kube-api-access-dbp6h\") pod \"nova-cell1-novncproxy-0\" (UID: \"81e1f339-37da-4c90-9f60-b4d369ea06a9\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.730679 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.774297 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.787368 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 05 23:24:42 crc kubenswrapper[4910]: I0105 23:24:42.882001 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-7j6xw"] Jan 05 23:24:43 crc kubenswrapper[4910]: I0105 23:24:43.044029 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-xgtkn"] Jan 05 23:24:43 crc kubenswrapper[4910]: I0105 23:24:43.045505 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-xgtkn" Jan 05 23:24:43 crc kubenswrapper[4910]: I0105 23:24:43.048560 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 05 23:24:43 crc kubenswrapper[4910]: I0105 23:24:43.054633 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Jan 05 23:24:43 crc kubenswrapper[4910]: I0105 23:24:43.060784 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-xgtkn"] Jan 05 23:24:43 crc kubenswrapper[4910]: W0105 23:24:43.066404 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4b5eae5b_ab47_4d68_a2b5_8af634824d09.slice/crio-6edb7cc47ccdf2504ec6bca811fa28c69da2171fb45f0107a609e0a6224f99be WatchSource:0}: Error finding container 6edb7cc47ccdf2504ec6bca811fa28c69da2171fb45f0107a609e0a6224f99be: Status 404 returned error can't find the container with id 6edb7cc47ccdf2504ec6bca811fa28c69da2171fb45f0107a609e0a6224f99be Jan 05 23:24:43 crc kubenswrapper[4910]: I0105 23:24:43.073511 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 05 23:24:43 crc kubenswrapper[4910]: I0105 23:24:43.108537 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4b5eae5b-ab47-4d68-a2b5-8af634824d09","Type":"ContainerStarted","Data":"6edb7cc47ccdf2504ec6bca811fa28c69da2171fb45f0107a609e0a6224f99be"} Jan 05 23:24:43 crc kubenswrapper[4910]: I0105 23:24:43.110484 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-7j6xw" event={"ID":"987fcce4-1c3a-4ffb-b340-65abf751215a","Type":"ContainerStarted","Data":"2aa6c17139921a32e4cdb0f6f55e90ee7af51529167e92a576aecb257cf02847"} Jan 05 23:24:43 crc kubenswrapper[4910]: I0105 23:24:43.110526 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-7j6xw" event={"ID":"987fcce4-1c3a-4ffb-b340-65abf751215a","Type":"ContainerStarted","Data":"8eba990bd18d71269baaf08baf27ae73860c80a635b5b899d720c31299b59402"} Jan 05 23:24:43 crc kubenswrapper[4910]: I0105 23:24:43.130664 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b95997f7-6qm88"] Jan 05 23:24:43 crc kubenswrapper[4910]: I0105 23:24:43.136043 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-7j6xw" podStartSLOduration=2.136021124 podStartE2EDuration="2.136021124s" podCreationTimestamp="2026-01-05 23:24:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:24:43.124104829 +0000 UTC m=+5614.701602499" watchObservedRunningTime="2026-01-05 23:24:43.136021124 +0000 UTC m=+5614.713518794" Jan 05 23:24:43 crc kubenswrapper[4910]: I0105 23:24:43.215843 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2b03dbb9-5f91-445d-8413-af5468bac3a4-scripts\") pod \"nova-cell1-conductor-db-sync-xgtkn\" (UID: \"2b03dbb9-5f91-445d-8413-af5468bac3a4\") " pod="openstack/nova-cell1-conductor-db-sync-xgtkn" Jan 05 23:24:43 crc kubenswrapper[4910]: I0105 23:24:43.216046 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b03dbb9-5f91-445d-8413-af5468bac3a4-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-xgtkn\" (UID: \"2b03dbb9-5f91-445d-8413-af5468bac3a4\") " pod="openstack/nova-cell1-conductor-db-sync-xgtkn" Jan 05 23:24:43 crc kubenswrapper[4910]: I0105 23:24:43.216191 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b03dbb9-5f91-445d-8413-af5468bac3a4-config-data\") pod \"nova-cell1-conductor-db-sync-xgtkn\" (UID: \"2b03dbb9-5f91-445d-8413-af5468bac3a4\") " pod="openstack/nova-cell1-conductor-db-sync-xgtkn" Jan 05 23:24:43 crc kubenswrapper[4910]: I0105 23:24:43.216235 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bszqn\" (UniqueName: \"kubernetes.io/projected/2b03dbb9-5f91-445d-8413-af5468bac3a4-kube-api-access-bszqn\") pod \"nova-cell1-conductor-db-sync-xgtkn\" (UID: \"2b03dbb9-5f91-445d-8413-af5468bac3a4\") " pod="openstack/nova-cell1-conductor-db-sync-xgtkn" Jan 05 23:24:43 crc kubenswrapper[4910]: I0105 23:24:43.250711 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 23:24:43 crc kubenswrapper[4910]: I0105 23:24:43.318247 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b03dbb9-5f91-445d-8413-af5468bac3a4-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-xgtkn\" (UID: \"2b03dbb9-5f91-445d-8413-af5468bac3a4\") " pod="openstack/nova-cell1-conductor-db-sync-xgtkn" Jan 05 23:24:43 crc kubenswrapper[4910]: I0105 23:24:43.318631 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b03dbb9-5f91-445d-8413-af5468bac3a4-config-data\") pod \"nova-cell1-conductor-db-sync-xgtkn\" (UID: \"2b03dbb9-5f91-445d-8413-af5468bac3a4\") " pod="openstack/nova-cell1-conductor-db-sync-xgtkn" Jan 05 23:24:43 crc kubenswrapper[4910]: I0105 23:24:43.318675 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bszqn\" (UniqueName: \"kubernetes.io/projected/2b03dbb9-5f91-445d-8413-af5468bac3a4-kube-api-access-bszqn\") pod \"nova-cell1-conductor-db-sync-xgtkn\" (UID: \"2b03dbb9-5f91-445d-8413-af5468bac3a4\") " pod="openstack/nova-cell1-conductor-db-sync-xgtkn" Jan 05 23:24:43 crc kubenswrapper[4910]: I0105 23:24:43.318709 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2b03dbb9-5f91-445d-8413-af5468bac3a4-scripts\") pod \"nova-cell1-conductor-db-sync-xgtkn\" (UID: \"2b03dbb9-5f91-445d-8413-af5468bac3a4\") " pod="openstack/nova-cell1-conductor-db-sync-xgtkn" Jan 05 23:24:43 crc kubenswrapper[4910]: I0105 23:24:43.325468 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b03dbb9-5f91-445d-8413-af5468bac3a4-config-data\") pod \"nova-cell1-conductor-db-sync-xgtkn\" (UID: \"2b03dbb9-5f91-445d-8413-af5468bac3a4\") " pod="openstack/nova-cell1-conductor-db-sync-xgtkn" Jan 05 23:24:43 crc kubenswrapper[4910]: I0105 23:24:43.325657 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2b03dbb9-5f91-445d-8413-af5468bac3a4-scripts\") pod \"nova-cell1-conductor-db-sync-xgtkn\" (UID: \"2b03dbb9-5f91-445d-8413-af5468bac3a4\") " pod="openstack/nova-cell1-conductor-db-sync-xgtkn" Jan 05 23:24:43 crc kubenswrapper[4910]: I0105 23:24:43.329450 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b03dbb9-5f91-445d-8413-af5468bac3a4-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-xgtkn\" (UID: \"2b03dbb9-5f91-445d-8413-af5468bac3a4\") " pod="openstack/nova-cell1-conductor-db-sync-xgtkn" Jan 05 23:24:43 crc kubenswrapper[4910]: I0105 23:24:43.335149 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 05 23:24:43 crc kubenswrapper[4910]: I0105 23:24:43.342878 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bszqn\" (UniqueName: \"kubernetes.io/projected/2b03dbb9-5f91-445d-8413-af5468bac3a4-kube-api-access-bszqn\") pod \"nova-cell1-conductor-db-sync-xgtkn\" (UID: \"2b03dbb9-5f91-445d-8413-af5468bac3a4\") " pod="openstack/nova-cell1-conductor-db-sync-xgtkn" Jan 05 23:24:43 crc kubenswrapper[4910]: I0105 23:24:43.345749 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 23:24:43 crc kubenswrapper[4910]: W0105 23:24:43.352418 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod81e1f339_37da_4c90_9f60_b4d369ea06a9.slice/crio-362719a26f7c8fab1591acb150980888d712379620b35751c769c8234c677394 WatchSource:0}: Error finding container 362719a26f7c8fab1591acb150980888d712379620b35751c769c8234c677394: Status 404 returned error can't find the container with id 362719a26f7c8fab1591acb150980888d712379620b35751c769c8234c677394 Jan 05 23:24:43 crc kubenswrapper[4910]: W0105 23:24:43.354267 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod754e2277_f84a_46a9_b7d8_789d955fe259.slice/crio-3a39210f4f37ee6e3cee1a9cc98f91843eff7b4d965a47bb32c47df4bdcc528b WatchSource:0}: Error finding container 3a39210f4f37ee6e3cee1a9cc98f91843eff7b4d965a47bb32c47df4bdcc528b: Status 404 returned error can't find the container with id 3a39210f4f37ee6e3cee1a9cc98f91843eff7b4d965a47bb32c47df4bdcc528b Jan 05 23:24:43 crc kubenswrapper[4910]: I0105 23:24:43.372705 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-xgtkn" Jan 05 23:24:43 crc kubenswrapper[4910]: I0105 23:24:43.914887 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-xgtkn"] Jan 05 23:24:44 crc kubenswrapper[4910]: I0105 23:24:44.123547 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4b5eae5b-ab47-4d68-a2b5-8af634824d09","Type":"ContainerStarted","Data":"defca9354aa1ef41728103bbbacd99bc96cbca996621573289129aa0b1acba96"} Jan 05 23:24:44 crc kubenswrapper[4910]: I0105 23:24:44.123946 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4b5eae5b-ab47-4d68-a2b5-8af634824d09","Type":"ContainerStarted","Data":"fd719df986d65ad0a93489af955c3a3c497291d405444665c880baabcac15a23"} Jan 05 23:24:44 crc kubenswrapper[4910]: I0105 23:24:44.125061 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"81e1f339-37da-4c90-9f60-b4d369ea06a9","Type":"ContainerStarted","Data":"cfe91149aac13f39a2503a5238dc89d8dfe2c1d8223e4a89c27c0523eab2437a"} Jan 05 23:24:44 crc kubenswrapper[4910]: I0105 23:24:44.125134 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"81e1f339-37da-4c90-9f60-b4d369ea06a9","Type":"ContainerStarted","Data":"362719a26f7c8fab1591acb150980888d712379620b35751c769c8234c677394"} Jan 05 23:24:44 crc kubenswrapper[4910]: I0105 23:24:44.127541 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-xgtkn" event={"ID":"2b03dbb9-5f91-445d-8413-af5468bac3a4","Type":"ContainerStarted","Data":"e9f8eb2fd303dcdb126d1a8ddb65a5e65f8315f105dbb04c373ae3297e2f2c5a"} Jan 05 23:24:44 crc kubenswrapper[4910]: I0105 23:24:44.127576 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-xgtkn" event={"ID":"2b03dbb9-5f91-445d-8413-af5468bac3a4","Type":"ContainerStarted","Data":"eed901000599fc42ad7b4b326e8c09f5530ffc94f2654fcd1c921db29a638ab9"} Jan 05 23:24:44 crc kubenswrapper[4910]: I0105 23:24:44.135663 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"03c62550-19aa-4c0b-9237-54d78b0ce624","Type":"ContainerStarted","Data":"87a4e265bdf1e43ccfdb89a573e06baf117518471d08080c0d61629956a3025a"} Jan 05 23:24:44 crc kubenswrapper[4910]: I0105 23:24:44.135705 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"03c62550-19aa-4c0b-9237-54d78b0ce624","Type":"ContainerStarted","Data":"4c955a06405586ff4035e2019f67b7ed4f9c4ccf329b97e1f08f95c6a2d43765"} Jan 05 23:24:44 crc kubenswrapper[4910]: I0105 23:24:44.135718 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"03c62550-19aa-4c0b-9237-54d78b0ce624","Type":"ContainerStarted","Data":"d23fe1a0a53f6b1472a551b6e7d6a441fbe7ff7b1972be8e0703d1ae71898fe3"} Jan 05 23:24:44 crc kubenswrapper[4910]: I0105 23:24:44.137282 4910 generic.go:334] "Generic (PLEG): container finished" podID="a29badab-486f-44b9-a355-de373ae072a4" containerID="e53e8ecdd20b5b155e16238fa027bc41386e1802b7caf5ebd2cbffc4c073b806" exitCode=0 Jan 05 23:24:44 crc kubenswrapper[4910]: I0105 23:24:44.137375 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b95997f7-6qm88" event={"ID":"a29badab-486f-44b9-a355-de373ae072a4","Type":"ContainerDied","Data":"e53e8ecdd20b5b155e16238fa027bc41386e1802b7caf5ebd2cbffc4c073b806"} Jan 05 23:24:44 crc kubenswrapper[4910]: I0105 23:24:44.137412 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b95997f7-6qm88" event={"ID":"a29badab-486f-44b9-a355-de373ae072a4","Type":"ContainerStarted","Data":"77eba28f3c294f6f5866ad0ac67e040e6c71a0edab3a47fbd36efba32a61d54b"} Jan 05 23:24:44 crc kubenswrapper[4910]: I0105 23:24:44.141312 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"754e2277-f84a-46a9-b7d8-789d955fe259","Type":"ContainerStarted","Data":"2191725ea8ecb9f8c829b71cbb233cb1f98d51ccfd1a7b66d9593a9626461a69"} Jan 05 23:24:44 crc kubenswrapper[4910]: I0105 23:24:44.141366 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"754e2277-f84a-46a9-b7d8-789d955fe259","Type":"ContainerStarted","Data":"3a39210f4f37ee6e3cee1a9cc98f91843eff7b4d965a47bb32c47df4bdcc528b"} Jan 05 23:24:44 crc kubenswrapper[4910]: I0105 23:24:44.151486 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.151463585 podStartE2EDuration="2.151463585s" podCreationTimestamp="2026-01-05 23:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:24:44.14155629 +0000 UTC m=+5615.719053960" watchObservedRunningTime="2026-01-05 23:24:44.151463585 +0000 UTC m=+5615.728961255" Jan 05 23:24:44 crc kubenswrapper[4910]: I0105 23:24:44.167800 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.167773789 podStartE2EDuration="2.167773789s" podCreationTimestamp="2026-01-05 23:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:24:44.163661347 +0000 UTC m=+5615.741159017" watchObservedRunningTime="2026-01-05 23:24:44.167773789 +0000 UTC m=+5615.745271459" Jan 05 23:24:44 crc kubenswrapper[4910]: I0105 23:24:44.228216 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-xgtkn" podStartSLOduration=1.228193095 podStartE2EDuration="1.228193095s" podCreationTimestamp="2026-01-05 23:24:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:24:44.187935598 +0000 UTC m=+5615.765433258" watchObservedRunningTime="2026-01-05 23:24:44.228193095 +0000 UTC m=+5615.805690765" Jan 05 23:24:44 crc kubenswrapper[4910]: I0105 23:24:44.255060 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.25503935 podStartE2EDuration="2.25503935s" podCreationTimestamp="2026-01-05 23:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:24:44.229227871 +0000 UTC m=+5615.806725591" watchObservedRunningTime="2026-01-05 23:24:44.25503935 +0000 UTC m=+5615.832537020" Jan 05 23:24:44 crc kubenswrapper[4910]: I0105 23:24:44.267093 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.267056438 podStartE2EDuration="2.267056438s" podCreationTimestamp="2026-01-05 23:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:24:44.247474993 +0000 UTC m=+5615.824972683" watchObservedRunningTime="2026-01-05 23:24:44.267056438 +0000 UTC m=+5615.844554108" Jan 05 23:24:45 crc kubenswrapper[4910]: I0105 23:24:45.153785 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b95997f7-6qm88" event={"ID":"a29badab-486f-44b9-a355-de373ae072a4","Type":"ContainerStarted","Data":"25934dd8362b09843a7146cfbcbd0441a4e425d546607e1f36315f7bc87f94d2"} Jan 05 23:24:45 crc kubenswrapper[4910]: I0105 23:24:45.154971 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5b95997f7-6qm88" Jan 05 23:24:45 crc kubenswrapper[4910]: I0105 23:24:45.183135 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5b95997f7-6qm88" podStartSLOduration=3.183094306 podStartE2EDuration="3.183094306s" podCreationTimestamp="2026-01-05 23:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:24:45.176741729 +0000 UTC m=+5616.754239399" watchObservedRunningTime="2026-01-05 23:24:45.183094306 +0000 UTC m=+5616.760591996" Jan 05 23:24:45 crc kubenswrapper[4910]: I0105 23:24:45.535966 4910 scope.go:117] "RemoveContainer" containerID="0a1f7f1866185bb1c850cbd0623f0a8bad579a67af1c00140e96328d70323774" Jan 05 23:24:45 crc kubenswrapper[4910]: I0105 23:24:45.557535 4910 scope.go:117] "RemoveContainer" containerID="3b2ed3a23f288bc632fd143388dd9ed43057aa911b20659d96f37951f73197e0" Jan 05 23:24:45 crc kubenswrapper[4910]: I0105 23:24:45.623189 4910 scope.go:117] "RemoveContainer" containerID="dfca0a76a4c0728a569ed4f7f0d64a2d878e53597d2a6aa9e3872c25b5573806" Jan 05 23:24:47 crc kubenswrapper[4910]: I0105 23:24:47.184614 4910 generic.go:334] "Generic (PLEG): container finished" podID="2b03dbb9-5f91-445d-8413-af5468bac3a4" containerID="e9f8eb2fd303dcdb126d1a8ddb65a5e65f8315f105dbb04c373ae3297e2f2c5a" exitCode=0 Jan 05 23:24:47 crc kubenswrapper[4910]: I0105 23:24:47.184739 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-xgtkn" event={"ID":"2b03dbb9-5f91-445d-8413-af5468bac3a4","Type":"ContainerDied","Data":"e9f8eb2fd303dcdb126d1a8ddb65a5e65f8315f105dbb04c373ae3297e2f2c5a"} Jan 05 23:24:47 crc kubenswrapper[4910]: I0105 23:24:47.730994 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 05 23:24:47 crc kubenswrapper[4910]: I0105 23:24:47.731071 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 05 23:24:47 crc kubenswrapper[4910]: I0105 23:24:47.775428 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 05 23:24:47 crc kubenswrapper[4910]: I0105 23:24:47.788273 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 05 23:24:48 crc kubenswrapper[4910]: I0105 23:24:48.232698 4910 generic.go:334] "Generic (PLEG): container finished" podID="987fcce4-1c3a-4ffb-b340-65abf751215a" containerID="2aa6c17139921a32e4cdb0f6f55e90ee7af51529167e92a576aecb257cf02847" exitCode=0 Jan 05 23:24:48 crc kubenswrapper[4910]: I0105 23:24:48.233305 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-7j6xw" event={"ID":"987fcce4-1c3a-4ffb-b340-65abf751215a","Type":"ContainerDied","Data":"2aa6c17139921a32e4cdb0f6f55e90ee7af51529167e92a576aecb257cf02847"} Jan 05 23:24:48 crc kubenswrapper[4910]: I0105 23:24:48.641473 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-xgtkn" Jan 05 23:24:48 crc kubenswrapper[4910]: I0105 23:24:48.756268 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b03dbb9-5f91-445d-8413-af5468bac3a4-combined-ca-bundle\") pod \"2b03dbb9-5f91-445d-8413-af5468bac3a4\" (UID: \"2b03dbb9-5f91-445d-8413-af5468bac3a4\") " Jan 05 23:24:48 crc kubenswrapper[4910]: I0105 23:24:48.756336 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b03dbb9-5f91-445d-8413-af5468bac3a4-config-data\") pod \"2b03dbb9-5f91-445d-8413-af5468bac3a4\" (UID: \"2b03dbb9-5f91-445d-8413-af5468bac3a4\") " Jan 05 23:24:48 crc kubenswrapper[4910]: I0105 23:24:48.756444 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2b03dbb9-5f91-445d-8413-af5468bac3a4-scripts\") pod \"2b03dbb9-5f91-445d-8413-af5468bac3a4\" (UID: \"2b03dbb9-5f91-445d-8413-af5468bac3a4\") " Jan 05 23:24:48 crc kubenswrapper[4910]: I0105 23:24:48.756658 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bszqn\" (UniqueName: \"kubernetes.io/projected/2b03dbb9-5f91-445d-8413-af5468bac3a4-kube-api-access-bszqn\") pod \"2b03dbb9-5f91-445d-8413-af5468bac3a4\" (UID: \"2b03dbb9-5f91-445d-8413-af5468bac3a4\") " Jan 05 23:24:48 crc kubenswrapper[4910]: I0105 23:24:48.765078 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b03dbb9-5f91-445d-8413-af5468bac3a4-scripts" (OuterVolumeSpecName: "scripts") pod "2b03dbb9-5f91-445d-8413-af5468bac3a4" (UID: "2b03dbb9-5f91-445d-8413-af5468bac3a4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:24:48 crc kubenswrapper[4910]: I0105 23:24:48.765584 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b03dbb9-5f91-445d-8413-af5468bac3a4-kube-api-access-bszqn" (OuterVolumeSpecName: "kube-api-access-bszqn") pod "2b03dbb9-5f91-445d-8413-af5468bac3a4" (UID: "2b03dbb9-5f91-445d-8413-af5468bac3a4"). InnerVolumeSpecName "kube-api-access-bszqn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:24:48 crc kubenswrapper[4910]: I0105 23:24:48.792758 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b03dbb9-5f91-445d-8413-af5468bac3a4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2b03dbb9-5f91-445d-8413-af5468bac3a4" (UID: "2b03dbb9-5f91-445d-8413-af5468bac3a4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:24:48 crc kubenswrapper[4910]: I0105 23:24:48.803117 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b03dbb9-5f91-445d-8413-af5468bac3a4-config-data" (OuterVolumeSpecName: "config-data") pod "2b03dbb9-5f91-445d-8413-af5468bac3a4" (UID: "2b03dbb9-5f91-445d-8413-af5468bac3a4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:24:48 crc kubenswrapper[4910]: I0105 23:24:48.858595 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b03dbb9-5f91-445d-8413-af5468bac3a4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:48 crc kubenswrapper[4910]: I0105 23:24:48.858796 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b03dbb9-5f91-445d-8413-af5468bac3a4-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:48 crc kubenswrapper[4910]: I0105 23:24:48.858866 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2b03dbb9-5f91-445d-8413-af5468bac3a4-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:48 crc kubenswrapper[4910]: I0105 23:24:48.858966 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bszqn\" (UniqueName: \"kubernetes.io/projected/2b03dbb9-5f91-445d-8413-af5468bac3a4-kube-api-access-bszqn\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:49 crc kubenswrapper[4910]: I0105 23:24:49.247880 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-xgtkn" event={"ID":"2b03dbb9-5f91-445d-8413-af5468bac3a4","Type":"ContainerDied","Data":"eed901000599fc42ad7b4b326e8c09f5530ffc94f2654fcd1c921db29a638ab9"} Jan 05 23:24:49 crc kubenswrapper[4910]: I0105 23:24:49.247969 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eed901000599fc42ad7b4b326e8c09f5530ffc94f2654fcd1c921db29a638ab9" Jan 05 23:24:49 crc kubenswrapper[4910]: I0105 23:24:49.247906 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-xgtkn" Jan 05 23:24:49 crc kubenswrapper[4910]: I0105 23:24:49.369220 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 05 23:24:49 crc kubenswrapper[4910]: E0105 23:24:49.370276 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b03dbb9-5f91-445d-8413-af5468bac3a4" containerName="nova-cell1-conductor-db-sync" Jan 05 23:24:49 crc kubenswrapper[4910]: I0105 23:24:49.370450 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b03dbb9-5f91-445d-8413-af5468bac3a4" containerName="nova-cell1-conductor-db-sync" Jan 05 23:24:49 crc kubenswrapper[4910]: I0105 23:24:49.370965 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b03dbb9-5f91-445d-8413-af5468bac3a4" containerName="nova-cell1-conductor-db-sync" Jan 05 23:24:49 crc kubenswrapper[4910]: I0105 23:24:49.372398 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 05 23:24:49 crc kubenswrapper[4910]: I0105 23:24:49.378211 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 05 23:24:49 crc kubenswrapper[4910]: I0105 23:24:49.382475 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 05 23:24:49 crc kubenswrapper[4910]: I0105 23:24:49.470396 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kxlkt\" (UniqueName: \"kubernetes.io/projected/55cf337c-c5d7-48cb-a18f-8c926b8c77e1-kube-api-access-kxlkt\") pod \"nova-cell1-conductor-0\" (UID: \"55cf337c-c5d7-48cb-a18f-8c926b8c77e1\") " pod="openstack/nova-cell1-conductor-0" Jan 05 23:24:49 crc kubenswrapper[4910]: I0105 23:24:49.470478 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55cf337c-c5d7-48cb-a18f-8c926b8c77e1-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"55cf337c-c5d7-48cb-a18f-8c926b8c77e1\") " pod="openstack/nova-cell1-conductor-0" Jan 05 23:24:49 crc kubenswrapper[4910]: I0105 23:24:49.470730 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55cf337c-c5d7-48cb-a18f-8c926b8c77e1-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"55cf337c-c5d7-48cb-a18f-8c926b8c77e1\") " pod="openstack/nova-cell1-conductor-0" Jan 05 23:24:49 crc kubenswrapper[4910]: I0105 23:24:49.572453 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55cf337c-c5d7-48cb-a18f-8c926b8c77e1-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"55cf337c-c5d7-48cb-a18f-8c926b8c77e1\") " pod="openstack/nova-cell1-conductor-0" Jan 05 23:24:49 crc kubenswrapper[4910]: I0105 23:24:49.573014 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55cf337c-c5d7-48cb-a18f-8c926b8c77e1-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"55cf337c-c5d7-48cb-a18f-8c926b8c77e1\") " pod="openstack/nova-cell1-conductor-0" Jan 05 23:24:49 crc kubenswrapper[4910]: I0105 23:24:49.573101 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kxlkt\" (UniqueName: \"kubernetes.io/projected/55cf337c-c5d7-48cb-a18f-8c926b8c77e1-kube-api-access-kxlkt\") pod \"nova-cell1-conductor-0\" (UID: \"55cf337c-c5d7-48cb-a18f-8c926b8c77e1\") " pod="openstack/nova-cell1-conductor-0" Jan 05 23:24:49 crc kubenswrapper[4910]: I0105 23:24:49.581227 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55cf337c-c5d7-48cb-a18f-8c926b8c77e1-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"55cf337c-c5d7-48cb-a18f-8c926b8c77e1\") " pod="openstack/nova-cell1-conductor-0" Jan 05 23:24:49 crc kubenswrapper[4910]: I0105 23:24:49.581719 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55cf337c-c5d7-48cb-a18f-8c926b8c77e1-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"55cf337c-c5d7-48cb-a18f-8c926b8c77e1\") " pod="openstack/nova-cell1-conductor-0" Jan 05 23:24:49 crc kubenswrapper[4910]: I0105 23:24:49.592983 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kxlkt\" (UniqueName: \"kubernetes.io/projected/55cf337c-c5d7-48cb-a18f-8c926b8c77e1-kube-api-access-kxlkt\") pod \"nova-cell1-conductor-0\" (UID: \"55cf337c-c5d7-48cb-a18f-8c926b8c77e1\") " pod="openstack/nova-cell1-conductor-0" Jan 05 23:24:49 crc kubenswrapper[4910]: I0105 23:24:49.700328 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-7j6xw" Jan 05 23:24:49 crc kubenswrapper[4910]: I0105 23:24:49.701114 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 05 23:24:49 crc kubenswrapper[4910]: I0105 23:24:49.879001 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-658cj\" (UniqueName: \"kubernetes.io/projected/987fcce4-1c3a-4ffb-b340-65abf751215a-kube-api-access-658cj\") pod \"987fcce4-1c3a-4ffb-b340-65abf751215a\" (UID: \"987fcce4-1c3a-4ffb-b340-65abf751215a\") " Jan 05 23:24:49 crc kubenswrapper[4910]: I0105 23:24:49.879545 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/987fcce4-1c3a-4ffb-b340-65abf751215a-scripts\") pod \"987fcce4-1c3a-4ffb-b340-65abf751215a\" (UID: \"987fcce4-1c3a-4ffb-b340-65abf751215a\") " Jan 05 23:24:49 crc kubenswrapper[4910]: I0105 23:24:49.879714 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/987fcce4-1c3a-4ffb-b340-65abf751215a-combined-ca-bundle\") pod \"987fcce4-1c3a-4ffb-b340-65abf751215a\" (UID: \"987fcce4-1c3a-4ffb-b340-65abf751215a\") " Jan 05 23:24:49 crc kubenswrapper[4910]: I0105 23:24:49.879779 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/987fcce4-1c3a-4ffb-b340-65abf751215a-config-data\") pod \"987fcce4-1c3a-4ffb-b340-65abf751215a\" (UID: \"987fcce4-1c3a-4ffb-b340-65abf751215a\") " Jan 05 23:24:49 crc kubenswrapper[4910]: I0105 23:24:49.883962 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/987fcce4-1c3a-4ffb-b340-65abf751215a-kube-api-access-658cj" (OuterVolumeSpecName: "kube-api-access-658cj") pod "987fcce4-1c3a-4ffb-b340-65abf751215a" (UID: "987fcce4-1c3a-4ffb-b340-65abf751215a"). InnerVolumeSpecName "kube-api-access-658cj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:24:49 crc kubenswrapper[4910]: I0105 23:24:49.884807 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/987fcce4-1c3a-4ffb-b340-65abf751215a-scripts" (OuterVolumeSpecName: "scripts") pod "987fcce4-1c3a-4ffb-b340-65abf751215a" (UID: "987fcce4-1c3a-4ffb-b340-65abf751215a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:24:49 crc kubenswrapper[4910]: I0105 23:24:49.910379 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/987fcce4-1c3a-4ffb-b340-65abf751215a-config-data" (OuterVolumeSpecName: "config-data") pod "987fcce4-1c3a-4ffb-b340-65abf751215a" (UID: "987fcce4-1c3a-4ffb-b340-65abf751215a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:24:49 crc kubenswrapper[4910]: I0105 23:24:49.931475 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/987fcce4-1c3a-4ffb-b340-65abf751215a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "987fcce4-1c3a-4ffb-b340-65abf751215a" (UID: "987fcce4-1c3a-4ffb-b340-65abf751215a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:24:49 crc kubenswrapper[4910]: I0105 23:24:49.981805 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/987fcce4-1c3a-4ffb-b340-65abf751215a-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:49 crc kubenswrapper[4910]: I0105 23:24:49.981836 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-658cj\" (UniqueName: \"kubernetes.io/projected/987fcce4-1c3a-4ffb-b340-65abf751215a-kube-api-access-658cj\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:49 crc kubenswrapper[4910]: I0105 23:24:49.981845 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/987fcce4-1c3a-4ffb-b340-65abf751215a-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:49 crc kubenswrapper[4910]: I0105 23:24:49.981855 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/987fcce4-1c3a-4ffb-b340-65abf751215a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:50 crc kubenswrapper[4910]: I0105 23:24:50.195698 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 05 23:24:50 crc kubenswrapper[4910]: W0105 23:24:50.201178 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod55cf337c_c5d7_48cb_a18f_8c926b8c77e1.slice/crio-d0df26d023660806b38b53486b449157eaf4c079ed7440f804e6b4f67dfb34b2 WatchSource:0}: Error finding container d0df26d023660806b38b53486b449157eaf4c079ed7440f804e6b4f67dfb34b2: Status 404 returned error can't find the container with id d0df26d023660806b38b53486b449157eaf4c079ed7440f804e6b4f67dfb34b2 Jan 05 23:24:50 crc kubenswrapper[4910]: I0105 23:24:50.268622 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-7j6xw" Jan 05 23:24:50 crc kubenswrapper[4910]: I0105 23:24:50.268880 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-7j6xw" event={"ID":"987fcce4-1c3a-4ffb-b340-65abf751215a","Type":"ContainerDied","Data":"8eba990bd18d71269baaf08baf27ae73860c80a635b5b899d720c31299b59402"} Jan 05 23:24:50 crc kubenswrapper[4910]: I0105 23:24:50.268948 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8eba990bd18d71269baaf08baf27ae73860c80a635b5b899d720c31299b59402" Jan 05 23:24:50 crc kubenswrapper[4910]: I0105 23:24:50.272854 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"55cf337c-c5d7-48cb-a18f-8c926b8c77e1","Type":"ContainerStarted","Data":"d0df26d023660806b38b53486b449157eaf4c079ed7440f804e6b4f67dfb34b2"} Jan 05 23:24:50 crc kubenswrapper[4910]: I0105 23:24:50.549078 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 05 23:24:50 crc kubenswrapper[4910]: I0105 23:24:50.549941 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="4b5eae5b-ab47-4d68-a2b5-8af634824d09" containerName="nova-api-log" containerID="cri-o://fd719df986d65ad0a93489af955c3a3c497291d405444665c880baabcac15a23" gracePeriod=30 Jan 05 23:24:50 crc kubenswrapper[4910]: I0105 23:24:50.550158 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="4b5eae5b-ab47-4d68-a2b5-8af634824d09" containerName="nova-api-api" containerID="cri-o://defca9354aa1ef41728103bbbacd99bc96cbca996621573289129aa0b1acba96" gracePeriod=30 Jan 05 23:24:50 crc kubenswrapper[4910]: I0105 23:24:50.571339 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 23:24:50 crc kubenswrapper[4910]: I0105 23:24:50.572068 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="754e2277-f84a-46a9-b7d8-789d955fe259" containerName="nova-scheduler-scheduler" containerID="cri-o://2191725ea8ecb9f8c829b71cbb233cb1f98d51ccfd1a7b66d9593a9626461a69" gracePeriod=30 Jan 05 23:24:50 crc kubenswrapper[4910]: I0105 23:24:50.591170 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 23:24:50 crc kubenswrapper[4910]: I0105 23:24:50.591471 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="03c62550-19aa-4c0b-9237-54d78b0ce624" containerName="nova-metadata-log" containerID="cri-o://4c955a06405586ff4035e2019f67b7ed4f9c4ccf329b97e1f08f95c6a2d43765" gracePeriod=30 Jan 05 23:24:50 crc kubenswrapper[4910]: I0105 23:24:50.592454 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="03c62550-19aa-4c0b-9237-54d78b0ce624" containerName="nova-metadata-metadata" containerID="cri-o://87a4e265bdf1e43ccfdb89a573e06baf117518471d08080c0d61629956a3025a" gracePeriod=30 Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.247499 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.287206 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.294224 4910 generic.go:334] "Generic (PLEG): container finished" podID="03c62550-19aa-4c0b-9237-54d78b0ce624" containerID="87a4e265bdf1e43ccfdb89a573e06baf117518471d08080c0d61629956a3025a" exitCode=0 Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.294264 4910 generic.go:334] "Generic (PLEG): container finished" podID="03c62550-19aa-4c0b-9237-54d78b0ce624" containerID="4c955a06405586ff4035e2019f67b7ed4f9c4ccf329b97e1f08f95c6a2d43765" exitCode=143 Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.294363 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"03c62550-19aa-4c0b-9237-54d78b0ce624","Type":"ContainerDied","Data":"87a4e265bdf1e43ccfdb89a573e06baf117518471d08080c0d61629956a3025a"} Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.294395 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"03c62550-19aa-4c0b-9237-54d78b0ce624","Type":"ContainerDied","Data":"4c955a06405586ff4035e2019f67b7ed4f9c4ccf329b97e1f08f95c6a2d43765"} Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.294409 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"03c62550-19aa-4c0b-9237-54d78b0ce624","Type":"ContainerDied","Data":"d23fe1a0a53f6b1472a551b6e7d6a441fbe7ff7b1972be8e0703d1ae71898fe3"} Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.294437 4910 scope.go:117] "RemoveContainer" containerID="87a4e265bdf1e43ccfdb89a573e06baf117518471d08080c0d61629956a3025a" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.294662 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.297778 4910 generic.go:334] "Generic (PLEG): container finished" podID="4b5eae5b-ab47-4d68-a2b5-8af634824d09" containerID="defca9354aa1ef41728103bbbacd99bc96cbca996621573289129aa0b1acba96" exitCode=0 Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.297815 4910 generic.go:334] "Generic (PLEG): container finished" podID="4b5eae5b-ab47-4d68-a2b5-8af634824d09" containerID="fd719df986d65ad0a93489af955c3a3c497291d405444665c880baabcac15a23" exitCode=143 Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.297872 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4b5eae5b-ab47-4d68-a2b5-8af634824d09","Type":"ContainerDied","Data":"defca9354aa1ef41728103bbbacd99bc96cbca996621573289129aa0b1acba96"} Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.297945 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4b5eae5b-ab47-4d68-a2b5-8af634824d09","Type":"ContainerDied","Data":"fd719df986d65ad0a93489af955c3a3c497291d405444665c880baabcac15a23"} Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.297964 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4b5eae5b-ab47-4d68-a2b5-8af634824d09","Type":"ContainerDied","Data":"6edb7cc47ccdf2504ec6bca811fa28c69da2171fb45f0107a609e0a6224f99be"} Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.298473 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.299959 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"55cf337c-c5d7-48cb-a18f-8c926b8c77e1","Type":"ContainerStarted","Data":"9a0e9ef7374f1bd2d5f92b2f62ba2f4b630ee553ebbe4f5c8b1de8c0a545a91c"} Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.300208 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.333581 4910 scope.go:117] "RemoveContainer" containerID="4c955a06405586ff4035e2019f67b7ed4f9c4ccf329b97e1f08f95c6a2d43765" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.334973 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.334953255 podStartE2EDuration="2.334953255s" podCreationTimestamp="2026-01-05 23:24:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:24:51.322893566 +0000 UTC m=+5622.900391256" watchObservedRunningTime="2026-01-05 23:24:51.334953255 +0000 UTC m=+5622.912450925" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.351513 4910 scope.go:117] "RemoveContainer" containerID="87a4e265bdf1e43ccfdb89a573e06baf117518471d08080c0d61629956a3025a" Jan 05 23:24:51 crc kubenswrapper[4910]: E0105 23:24:51.352758 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"87a4e265bdf1e43ccfdb89a573e06baf117518471d08080c0d61629956a3025a\": container with ID starting with 87a4e265bdf1e43ccfdb89a573e06baf117518471d08080c0d61629956a3025a not found: ID does not exist" containerID="87a4e265bdf1e43ccfdb89a573e06baf117518471d08080c0d61629956a3025a" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.352812 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87a4e265bdf1e43ccfdb89a573e06baf117518471d08080c0d61629956a3025a"} err="failed to get container status \"87a4e265bdf1e43ccfdb89a573e06baf117518471d08080c0d61629956a3025a\": rpc error: code = NotFound desc = could not find container \"87a4e265bdf1e43ccfdb89a573e06baf117518471d08080c0d61629956a3025a\": container with ID starting with 87a4e265bdf1e43ccfdb89a573e06baf117518471d08080c0d61629956a3025a not found: ID does not exist" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.352843 4910 scope.go:117] "RemoveContainer" containerID="4c955a06405586ff4035e2019f67b7ed4f9c4ccf329b97e1f08f95c6a2d43765" Jan 05 23:24:51 crc kubenswrapper[4910]: E0105 23:24:51.353160 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c955a06405586ff4035e2019f67b7ed4f9c4ccf329b97e1f08f95c6a2d43765\": container with ID starting with 4c955a06405586ff4035e2019f67b7ed4f9c4ccf329b97e1f08f95c6a2d43765 not found: ID does not exist" containerID="4c955a06405586ff4035e2019f67b7ed4f9c4ccf329b97e1f08f95c6a2d43765" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.353192 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c955a06405586ff4035e2019f67b7ed4f9c4ccf329b97e1f08f95c6a2d43765"} err="failed to get container status \"4c955a06405586ff4035e2019f67b7ed4f9c4ccf329b97e1f08f95c6a2d43765\": rpc error: code = NotFound desc = could not find container \"4c955a06405586ff4035e2019f67b7ed4f9c4ccf329b97e1f08f95c6a2d43765\": container with ID starting with 4c955a06405586ff4035e2019f67b7ed4f9c4ccf329b97e1f08f95c6a2d43765 not found: ID does not exist" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.353213 4910 scope.go:117] "RemoveContainer" containerID="87a4e265bdf1e43ccfdb89a573e06baf117518471d08080c0d61629956a3025a" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.354779 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87a4e265bdf1e43ccfdb89a573e06baf117518471d08080c0d61629956a3025a"} err="failed to get container status \"87a4e265bdf1e43ccfdb89a573e06baf117518471d08080c0d61629956a3025a\": rpc error: code = NotFound desc = could not find container \"87a4e265bdf1e43ccfdb89a573e06baf117518471d08080c0d61629956a3025a\": container with ID starting with 87a4e265bdf1e43ccfdb89a573e06baf117518471d08080c0d61629956a3025a not found: ID does not exist" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.354821 4910 scope.go:117] "RemoveContainer" containerID="4c955a06405586ff4035e2019f67b7ed4f9c4ccf329b97e1f08f95c6a2d43765" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.355145 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c955a06405586ff4035e2019f67b7ed4f9c4ccf329b97e1f08f95c6a2d43765"} err="failed to get container status \"4c955a06405586ff4035e2019f67b7ed4f9c4ccf329b97e1f08f95c6a2d43765\": rpc error: code = NotFound desc = could not find container \"4c955a06405586ff4035e2019f67b7ed4f9c4ccf329b97e1f08f95c6a2d43765\": container with ID starting with 4c955a06405586ff4035e2019f67b7ed4f9c4ccf329b97e1f08f95c6a2d43765 not found: ID does not exist" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.355162 4910 scope.go:117] "RemoveContainer" containerID="defca9354aa1ef41728103bbbacd99bc96cbca996621573289129aa0b1acba96" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.377163 4910 scope.go:117] "RemoveContainer" containerID="fd719df986d65ad0a93489af955c3a3c497291d405444665c880baabcac15a23" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.399168 4910 scope.go:117] "RemoveContainer" containerID="defca9354aa1ef41728103bbbacd99bc96cbca996621573289129aa0b1acba96" Jan 05 23:24:51 crc kubenswrapper[4910]: E0105 23:24:51.406928 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"defca9354aa1ef41728103bbbacd99bc96cbca996621573289129aa0b1acba96\": container with ID starting with defca9354aa1ef41728103bbbacd99bc96cbca996621573289129aa0b1acba96 not found: ID does not exist" containerID="defca9354aa1ef41728103bbbacd99bc96cbca996621573289129aa0b1acba96" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.406981 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"defca9354aa1ef41728103bbbacd99bc96cbca996621573289129aa0b1acba96"} err="failed to get container status \"defca9354aa1ef41728103bbbacd99bc96cbca996621573289129aa0b1acba96\": rpc error: code = NotFound desc = could not find container \"defca9354aa1ef41728103bbbacd99bc96cbca996621573289129aa0b1acba96\": container with ID starting with defca9354aa1ef41728103bbbacd99bc96cbca996621573289129aa0b1acba96 not found: ID does not exist" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.407017 4910 scope.go:117] "RemoveContainer" containerID="fd719df986d65ad0a93489af955c3a3c497291d405444665c880baabcac15a23" Jan 05 23:24:51 crc kubenswrapper[4910]: E0105 23:24:51.407887 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd719df986d65ad0a93489af955c3a3c497291d405444665c880baabcac15a23\": container with ID starting with fd719df986d65ad0a93489af955c3a3c497291d405444665c880baabcac15a23 not found: ID does not exist" containerID="fd719df986d65ad0a93489af955c3a3c497291d405444665c880baabcac15a23" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.407921 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd719df986d65ad0a93489af955c3a3c497291d405444665c880baabcac15a23"} err="failed to get container status \"fd719df986d65ad0a93489af955c3a3c497291d405444665c880baabcac15a23\": rpc error: code = NotFound desc = could not find container \"fd719df986d65ad0a93489af955c3a3c497291d405444665c880baabcac15a23\": container with ID starting with fd719df986d65ad0a93489af955c3a3c497291d405444665c880baabcac15a23 not found: ID does not exist" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.407941 4910 scope.go:117] "RemoveContainer" containerID="defca9354aa1ef41728103bbbacd99bc96cbca996621573289129aa0b1acba96" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.408189 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"defca9354aa1ef41728103bbbacd99bc96cbca996621573289129aa0b1acba96"} err="failed to get container status \"defca9354aa1ef41728103bbbacd99bc96cbca996621573289129aa0b1acba96\": rpc error: code = NotFound desc = could not find container \"defca9354aa1ef41728103bbbacd99bc96cbca996621573289129aa0b1acba96\": container with ID starting with defca9354aa1ef41728103bbbacd99bc96cbca996621573289129aa0b1acba96 not found: ID does not exist" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.408215 4910 scope.go:117] "RemoveContainer" containerID="fd719df986d65ad0a93489af955c3a3c497291d405444665c880baabcac15a23" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.408803 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd719df986d65ad0a93489af955c3a3c497291d405444665c880baabcac15a23"} err="failed to get container status \"fd719df986d65ad0a93489af955c3a3c497291d405444665c880baabcac15a23\": rpc error: code = NotFound desc = could not find container \"fd719df986d65ad0a93489af955c3a3c497291d405444665c880baabcac15a23\": container with ID starting with fd719df986d65ad0a93489af955c3a3c497291d405444665c880baabcac15a23 not found: ID does not exist" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.420504 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03c62550-19aa-4c0b-9237-54d78b0ce624-combined-ca-bundle\") pod \"03c62550-19aa-4c0b-9237-54d78b0ce624\" (UID: \"03c62550-19aa-4c0b-9237-54d78b0ce624\") " Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.420576 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/03c62550-19aa-4c0b-9237-54d78b0ce624-logs\") pod \"03c62550-19aa-4c0b-9237-54d78b0ce624\" (UID: \"03c62550-19aa-4c0b-9237-54d78b0ce624\") " Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.420682 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-npgvg\" (UniqueName: \"kubernetes.io/projected/03c62550-19aa-4c0b-9237-54d78b0ce624-kube-api-access-npgvg\") pod \"03c62550-19aa-4c0b-9237-54d78b0ce624\" (UID: \"03c62550-19aa-4c0b-9237-54d78b0ce624\") " Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.420707 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b5eae5b-ab47-4d68-a2b5-8af634824d09-config-data\") pod \"4b5eae5b-ab47-4d68-a2b5-8af634824d09\" (UID: \"4b5eae5b-ab47-4d68-a2b5-8af634824d09\") " Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.420746 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b5eae5b-ab47-4d68-a2b5-8af634824d09-combined-ca-bundle\") pod \"4b5eae5b-ab47-4d68-a2b5-8af634824d09\" (UID: \"4b5eae5b-ab47-4d68-a2b5-8af634824d09\") " Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.420794 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b5eae5b-ab47-4d68-a2b5-8af634824d09-logs\") pod \"4b5eae5b-ab47-4d68-a2b5-8af634824d09\" (UID: \"4b5eae5b-ab47-4d68-a2b5-8af634824d09\") " Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.420817 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-49gbr\" (UniqueName: \"kubernetes.io/projected/4b5eae5b-ab47-4d68-a2b5-8af634824d09-kube-api-access-49gbr\") pod \"4b5eae5b-ab47-4d68-a2b5-8af634824d09\" (UID: \"4b5eae5b-ab47-4d68-a2b5-8af634824d09\") " Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.420884 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03c62550-19aa-4c0b-9237-54d78b0ce624-config-data\") pod \"03c62550-19aa-4c0b-9237-54d78b0ce624\" (UID: \"03c62550-19aa-4c0b-9237-54d78b0ce624\") " Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.421246 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/03c62550-19aa-4c0b-9237-54d78b0ce624-logs" (OuterVolumeSpecName: "logs") pod "03c62550-19aa-4c0b-9237-54d78b0ce624" (UID: "03c62550-19aa-4c0b-9237-54d78b0ce624"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.421538 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/03c62550-19aa-4c0b-9237-54d78b0ce624-logs\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.421722 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b5eae5b-ab47-4d68-a2b5-8af634824d09-logs" (OuterVolumeSpecName: "logs") pod "4b5eae5b-ab47-4d68-a2b5-8af634824d09" (UID: "4b5eae5b-ab47-4d68-a2b5-8af634824d09"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.428585 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b5eae5b-ab47-4d68-a2b5-8af634824d09-kube-api-access-49gbr" (OuterVolumeSpecName: "kube-api-access-49gbr") pod "4b5eae5b-ab47-4d68-a2b5-8af634824d09" (UID: "4b5eae5b-ab47-4d68-a2b5-8af634824d09"). InnerVolumeSpecName "kube-api-access-49gbr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.429536 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03c62550-19aa-4c0b-9237-54d78b0ce624-kube-api-access-npgvg" (OuterVolumeSpecName: "kube-api-access-npgvg") pod "03c62550-19aa-4c0b-9237-54d78b0ce624" (UID: "03c62550-19aa-4c0b-9237-54d78b0ce624"). InnerVolumeSpecName "kube-api-access-npgvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.450679 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03c62550-19aa-4c0b-9237-54d78b0ce624-config-data" (OuterVolumeSpecName: "config-data") pod "03c62550-19aa-4c0b-9237-54d78b0ce624" (UID: "03c62550-19aa-4c0b-9237-54d78b0ce624"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.450867 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b5eae5b-ab47-4d68-a2b5-8af634824d09-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4b5eae5b-ab47-4d68-a2b5-8af634824d09" (UID: "4b5eae5b-ab47-4d68-a2b5-8af634824d09"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.453902 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03c62550-19aa-4c0b-9237-54d78b0ce624-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "03c62550-19aa-4c0b-9237-54d78b0ce624" (UID: "03c62550-19aa-4c0b-9237-54d78b0ce624"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.454560 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b5eae5b-ab47-4d68-a2b5-8af634824d09-config-data" (OuterVolumeSpecName: "config-data") pod "4b5eae5b-ab47-4d68-a2b5-8af634824d09" (UID: "4b5eae5b-ab47-4d68-a2b5-8af634824d09"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.524393 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b5eae5b-ab47-4d68-a2b5-8af634824d09-logs\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.524475 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-49gbr\" (UniqueName: \"kubernetes.io/projected/4b5eae5b-ab47-4d68-a2b5-8af634824d09-kube-api-access-49gbr\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.524496 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03c62550-19aa-4c0b-9237-54d78b0ce624-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.524511 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03c62550-19aa-4c0b-9237-54d78b0ce624-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.524524 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-npgvg\" (UniqueName: \"kubernetes.io/projected/03c62550-19aa-4c0b-9237-54d78b0ce624-kube-api-access-npgvg\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.524564 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b5eae5b-ab47-4d68-a2b5-8af634824d09-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.524578 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b5eae5b-ab47-4d68-a2b5-8af634824d09-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.687989 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.702301 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.709538 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.717012 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.727547 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 05 23:24:51 crc kubenswrapper[4910]: E0105 23:24:51.728054 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03c62550-19aa-4c0b-9237-54d78b0ce624" containerName="nova-metadata-log" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.728079 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="03c62550-19aa-4c0b-9237-54d78b0ce624" containerName="nova-metadata-log" Jan 05 23:24:51 crc kubenswrapper[4910]: E0105 23:24:51.728109 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="987fcce4-1c3a-4ffb-b340-65abf751215a" containerName="nova-manage" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.728136 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="987fcce4-1c3a-4ffb-b340-65abf751215a" containerName="nova-manage" Jan 05 23:24:51 crc kubenswrapper[4910]: E0105 23:24:51.728149 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b5eae5b-ab47-4d68-a2b5-8af634824d09" containerName="nova-api-api" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.728157 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b5eae5b-ab47-4d68-a2b5-8af634824d09" containerName="nova-api-api" Jan 05 23:24:51 crc kubenswrapper[4910]: E0105 23:24:51.728172 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03c62550-19aa-4c0b-9237-54d78b0ce624" containerName="nova-metadata-metadata" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.728181 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="03c62550-19aa-4c0b-9237-54d78b0ce624" containerName="nova-metadata-metadata" Jan 05 23:24:51 crc kubenswrapper[4910]: E0105 23:24:51.728208 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b5eae5b-ab47-4d68-a2b5-8af634824d09" containerName="nova-api-log" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.728217 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b5eae5b-ab47-4d68-a2b5-8af634824d09" containerName="nova-api-log" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.728442 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b5eae5b-ab47-4d68-a2b5-8af634824d09" containerName="nova-api-log" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.728468 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b5eae5b-ab47-4d68-a2b5-8af634824d09" containerName="nova-api-api" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.728487 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="03c62550-19aa-4c0b-9237-54d78b0ce624" containerName="nova-metadata-metadata" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.728502 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="987fcce4-1c3a-4ffb-b340-65abf751215a" containerName="nova-manage" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.728516 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="03c62550-19aa-4c0b-9237-54d78b0ce624" containerName="nova-metadata-log" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.729896 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.734011 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.734631 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.737594 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.741684 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.748474 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.755867 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.830562 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09fc0e9f-42fa-4177-aa14-680c36d4b679-config-data\") pod \"nova-api-0\" (UID: \"09fc0e9f-42fa-4177-aa14-680c36d4b679\") " pod="openstack/nova-api-0" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.830620 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/09fc0e9f-42fa-4177-aa14-680c36d4b679-logs\") pod \"nova-api-0\" (UID: \"09fc0e9f-42fa-4177-aa14-680c36d4b679\") " pod="openstack/nova-api-0" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.830685 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ffce34a-e594-439d-a15e-b3740ebdbab8-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0ffce34a-e594-439d-a15e-b3740ebdbab8\") " pod="openstack/nova-metadata-0" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.830732 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ffce34a-e594-439d-a15e-b3740ebdbab8-config-data\") pod \"nova-metadata-0\" (UID: \"0ffce34a-e594-439d-a15e-b3740ebdbab8\") " pod="openstack/nova-metadata-0" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.830776 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9zx94\" (UniqueName: \"kubernetes.io/projected/0ffce34a-e594-439d-a15e-b3740ebdbab8-kube-api-access-9zx94\") pod \"nova-metadata-0\" (UID: \"0ffce34a-e594-439d-a15e-b3740ebdbab8\") " pod="openstack/nova-metadata-0" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.830794 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0ffce34a-e594-439d-a15e-b3740ebdbab8-logs\") pod \"nova-metadata-0\" (UID: \"0ffce34a-e594-439d-a15e-b3740ebdbab8\") " pod="openstack/nova-metadata-0" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.830811 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kkm8t\" (UniqueName: \"kubernetes.io/projected/09fc0e9f-42fa-4177-aa14-680c36d4b679-kube-api-access-kkm8t\") pod \"nova-api-0\" (UID: \"09fc0e9f-42fa-4177-aa14-680c36d4b679\") " pod="openstack/nova-api-0" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.830844 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09fc0e9f-42fa-4177-aa14-680c36d4b679-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"09fc0e9f-42fa-4177-aa14-680c36d4b679\") " pod="openstack/nova-api-0" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.933300 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09fc0e9f-42fa-4177-aa14-680c36d4b679-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"09fc0e9f-42fa-4177-aa14-680c36d4b679\") " pod="openstack/nova-api-0" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.933461 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09fc0e9f-42fa-4177-aa14-680c36d4b679-config-data\") pod \"nova-api-0\" (UID: \"09fc0e9f-42fa-4177-aa14-680c36d4b679\") " pod="openstack/nova-api-0" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.933529 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/09fc0e9f-42fa-4177-aa14-680c36d4b679-logs\") pod \"nova-api-0\" (UID: \"09fc0e9f-42fa-4177-aa14-680c36d4b679\") " pod="openstack/nova-api-0" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.933606 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ffce34a-e594-439d-a15e-b3740ebdbab8-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0ffce34a-e594-439d-a15e-b3740ebdbab8\") " pod="openstack/nova-metadata-0" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.933691 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ffce34a-e594-439d-a15e-b3740ebdbab8-config-data\") pod \"nova-metadata-0\" (UID: \"0ffce34a-e594-439d-a15e-b3740ebdbab8\") " pod="openstack/nova-metadata-0" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.933800 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9zx94\" (UniqueName: \"kubernetes.io/projected/0ffce34a-e594-439d-a15e-b3740ebdbab8-kube-api-access-9zx94\") pod \"nova-metadata-0\" (UID: \"0ffce34a-e594-439d-a15e-b3740ebdbab8\") " pod="openstack/nova-metadata-0" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.933864 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0ffce34a-e594-439d-a15e-b3740ebdbab8-logs\") pod \"nova-metadata-0\" (UID: \"0ffce34a-e594-439d-a15e-b3740ebdbab8\") " pod="openstack/nova-metadata-0" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.933917 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kkm8t\" (UniqueName: \"kubernetes.io/projected/09fc0e9f-42fa-4177-aa14-680c36d4b679-kube-api-access-kkm8t\") pod \"nova-api-0\" (UID: \"09fc0e9f-42fa-4177-aa14-680c36d4b679\") " pod="openstack/nova-api-0" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.934710 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/09fc0e9f-42fa-4177-aa14-680c36d4b679-logs\") pod \"nova-api-0\" (UID: \"09fc0e9f-42fa-4177-aa14-680c36d4b679\") " pod="openstack/nova-api-0" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.935648 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0ffce34a-e594-439d-a15e-b3740ebdbab8-logs\") pod \"nova-metadata-0\" (UID: \"0ffce34a-e594-439d-a15e-b3740ebdbab8\") " pod="openstack/nova-metadata-0" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.941798 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ffce34a-e594-439d-a15e-b3740ebdbab8-config-data\") pod \"nova-metadata-0\" (UID: \"0ffce34a-e594-439d-a15e-b3740ebdbab8\") " pod="openstack/nova-metadata-0" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.955218 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kkm8t\" (UniqueName: \"kubernetes.io/projected/09fc0e9f-42fa-4177-aa14-680c36d4b679-kube-api-access-kkm8t\") pod \"nova-api-0\" (UID: \"09fc0e9f-42fa-4177-aa14-680c36d4b679\") " pod="openstack/nova-api-0" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.955976 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09fc0e9f-42fa-4177-aa14-680c36d4b679-config-data\") pod \"nova-api-0\" (UID: \"09fc0e9f-42fa-4177-aa14-680c36d4b679\") " pod="openstack/nova-api-0" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.956365 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ffce34a-e594-439d-a15e-b3740ebdbab8-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0ffce34a-e594-439d-a15e-b3740ebdbab8\") " pod="openstack/nova-metadata-0" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.963970 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09fc0e9f-42fa-4177-aa14-680c36d4b679-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"09fc0e9f-42fa-4177-aa14-680c36d4b679\") " pod="openstack/nova-api-0" Jan 05 23:24:51 crc kubenswrapper[4910]: I0105 23:24:51.964099 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9zx94\" (UniqueName: \"kubernetes.io/projected/0ffce34a-e594-439d-a15e-b3740ebdbab8-kube-api-access-9zx94\") pod \"nova-metadata-0\" (UID: \"0ffce34a-e594-439d-a15e-b3740ebdbab8\") " pod="openstack/nova-metadata-0" Jan 05 23:24:52 crc kubenswrapper[4910]: I0105 23:24:52.067843 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 05 23:24:52 crc kubenswrapper[4910]: I0105 23:24:52.068003 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 05 23:24:52 crc kubenswrapper[4910]: I0105 23:24:52.591902 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 05 23:24:52 crc kubenswrapper[4910]: W0105 23:24:52.592817 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod09fc0e9f_42fa_4177_aa14_680c36d4b679.slice/crio-9841bf57b6f7d7cd2ded2ebbf8ce564cb7b8f209dd273045031aca7476bdbe9c WatchSource:0}: Error finding container 9841bf57b6f7d7cd2ded2ebbf8ce564cb7b8f209dd273045031aca7476bdbe9c: Status 404 returned error can't find the container with id 9841bf57b6f7d7cd2ded2ebbf8ce564cb7b8f209dd273045031aca7476bdbe9c Jan 05 23:24:52 crc kubenswrapper[4910]: I0105 23:24:52.643236 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5b95997f7-6qm88" Jan 05 23:24:52 crc kubenswrapper[4910]: I0105 23:24:52.665877 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 23:24:52 crc kubenswrapper[4910]: W0105 23:24:52.688990 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0ffce34a_e594_439d_a15e_b3740ebdbab8.slice/crio-77bc7e0d4c107c2b1ff85ae9aa6379f69b9fedfcaa40060dc37f8d59ca3ec43d WatchSource:0}: Error finding container 77bc7e0d4c107c2b1ff85ae9aa6379f69b9fedfcaa40060dc37f8d59ca3ec43d: Status 404 returned error can't find the container with id 77bc7e0d4c107c2b1ff85ae9aa6379f69b9fedfcaa40060dc37f8d59ca3ec43d Jan 05 23:24:52 crc kubenswrapper[4910]: I0105 23:24:52.783590 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="03c62550-19aa-4c0b-9237-54d78b0ce624" path="/var/lib/kubelet/pods/03c62550-19aa-4c0b-9237-54d78b0ce624/volumes" Jan 05 23:24:52 crc kubenswrapper[4910]: I0105 23:24:52.784274 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b5eae5b-ab47-4d68-a2b5-8af634824d09" path="/var/lib/kubelet/pods/4b5eae5b-ab47-4d68-a2b5-8af634824d09/volumes" Jan 05 23:24:52 crc kubenswrapper[4910]: I0105 23:24:52.784885 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6766f689d9-xljtt"] Jan 05 23:24:52 crc kubenswrapper[4910]: I0105 23:24:52.785252 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6766f689d9-xljtt" podUID="dac76acf-d1d0-40dd-9bba-53f9f52eb844" containerName="dnsmasq-dns" containerID="cri-o://dda3c8d467b38cffda17383a9c2affbda4b9b11be91f4b73324ddfa487968cd7" gracePeriod=10 Jan 05 23:24:52 crc kubenswrapper[4910]: I0105 23:24:52.788363 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Jan 05 23:24:52 crc kubenswrapper[4910]: I0105 23:24:52.816169 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.234566 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6766f689d9-xljtt" Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.324683 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0ffce34a-e594-439d-a15e-b3740ebdbab8","Type":"ContainerStarted","Data":"056759bb1a9d7538f78f95f0831254528d6a3437e98e4b981a204b193f028d59"} Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.324735 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0ffce34a-e594-439d-a15e-b3740ebdbab8","Type":"ContainerStarted","Data":"e1d87310adfcc58ff0dbcce33fb401ead26831bbb19cb431ba43ec72aacee30b"} Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.324744 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0ffce34a-e594-439d-a15e-b3740ebdbab8","Type":"ContainerStarted","Data":"77bc7e0d4c107c2b1ff85ae9aa6379f69b9fedfcaa40060dc37f8d59ca3ec43d"} Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.328430 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"09fc0e9f-42fa-4177-aa14-680c36d4b679","Type":"ContainerStarted","Data":"d13b6fbaadca358a5dcbbb25ade3d32d3e64cb0b633bbc40f067a4a1f063d2e3"} Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.328457 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"09fc0e9f-42fa-4177-aa14-680c36d4b679","Type":"ContainerStarted","Data":"da3f0d7a8ed1eea2cb193652991a51d98bc426bd93c7c77347f5a114ec10d95a"} Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.328468 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"09fc0e9f-42fa-4177-aa14-680c36d4b679","Type":"ContainerStarted","Data":"9841bf57b6f7d7cd2ded2ebbf8ce564cb7b8f209dd273045031aca7476bdbe9c"} Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.330148 4910 generic.go:334] "Generic (PLEG): container finished" podID="dac76acf-d1d0-40dd-9bba-53f9f52eb844" containerID="dda3c8d467b38cffda17383a9c2affbda4b9b11be91f4b73324ddfa487968cd7" exitCode=0 Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.330279 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6766f689d9-xljtt" event={"ID":"dac76acf-d1d0-40dd-9bba-53f9f52eb844","Type":"ContainerDied","Data":"dda3c8d467b38cffda17383a9c2affbda4b9b11be91f4b73324ddfa487968cd7"} Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.330301 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6766f689d9-xljtt" event={"ID":"dac76acf-d1d0-40dd-9bba-53f9f52eb844","Type":"ContainerDied","Data":"24d2f353d1bd894faa6ff15ec98b05e00aa8b938b9ac784113013fd83221c8af"} Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.330322 4910 scope.go:117] "RemoveContainer" containerID="dda3c8d467b38cffda17383a9c2affbda4b9b11be91f4b73324ddfa487968cd7" Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.331011 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6766f689d9-xljtt" Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.347103 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.351432 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.3514107969999998 podStartE2EDuration="2.351410797s" podCreationTimestamp="2026-01-05 23:24:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:24:53.345249305 +0000 UTC m=+5624.922746985" watchObservedRunningTime="2026-01-05 23:24:53.351410797 +0000 UTC m=+5624.928908477" Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.354149 4910 scope.go:117] "RemoveContainer" containerID="346b9b475d5cf4120380530cad52e8cd8d2af07fef23afed40e7089da2fe9a94" Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.374779 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.374756896 podStartE2EDuration="2.374756896s" podCreationTimestamp="2026-01-05 23:24:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:24:53.373466624 +0000 UTC m=+5624.950964294" watchObservedRunningTime="2026-01-05 23:24:53.374756896 +0000 UTC m=+5624.952254566" Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.394229 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dac76acf-d1d0-40dd-9bba-53f9f52eb844-dns-svc\") pod \"dac76acf-d1d0-40dd-9bba-53f9f52eb844\" (UID: \"dac76acf-d1d0-40dd-9bba-53f9f52eb844\") " Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.394413 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5lwl7\" (UniqueName: \"kubernetes.io/projected/dac76acf-d1d0-40dd-9bba-53f9f52eb844-kube-api-access-5lwl7\") pod \"dac76acf-d1d0-40dd-9bba-53f9f52eb844\" (UID: \"dac76acf-d1d0-40dd-9bba-53f9f52eb844\") " Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.394533 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dac76acf-d1d0-40dd-9bba-53f9f52eb844-ovsdbserver-nb\") pod \"dac76acf-d1d0-40dd-9bba-53f9f52eb844\" (UID: \"dac76acf-d1d0-40dd-9bba-53f9f52eb844\") " Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.394674 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dac76acf-d1d0-40dd-9bba-53f9f52eb844-config\") pod \"dac76acf-d1d0-40dd-9bba-53f9f52eb844\" (UID: \"dac76acf-d1d0-40dd-9bba-53f9f52eb844\") " Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.394746 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dac76acf-d1d0-40dd-9bba-53f9f52eb844-ovsdbserver-sb\") pod \"dac76acf-d1d0-40dd-9bba-53f9f52eb844\" (UID: \"dac76acf-d1d0-40dd-9bba-53f9f52eb844\") " Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.403327 4910 scope.go:117] "RemoveContainer" containerID="dda3c8d467b38cffda17383a9c2affbda4b9b11be91f4b73324ddfa487968cd7" Jan 05 23:24:53 crc kubenswrapper[4910]: E0105 23:24:53.404433 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dda3c8d467b38cffda17383a9c2affbda4b9b11be91f4b73324ddfa487968cd7\": container with ID starting with dda3c8d467b38cffda17383a9c2affbda4b9b11be91f4b73324ddfa487968cd7 not found: ID does not exist" containerID="dda3c8d467b38cffda17383a9c2affbda4b9b11be91f4b73324ddfa487968cd7" Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.404542 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dda3c8d467b38cffda17383a9c2affbda4b9b11be91f4b73324ddfa487968cd7"} err="failed to get container status \"dda3c8d467b38cffda17383a9c2affbda4b9b11be91f4b73324ddfa487968cd7\": rpc error: code = NotFound desc = could not find container \"dda3c8d467b38cffda17383a9c2affbda4b9b11be91f4b73324ddfa487968cd7\": container with ID starting with dda3c8d467b38cffda17383a9c2affbda4b9b11be91f4b73324ddfa487968cd7 not found: ID does not exist" Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.405833 4910 scope.go:117] "RemoveContainer" containerID="346b9b475d5cf4120380530cad52e8cd8d2af07fef23afed40e7089da2fe9a94" Jan 05 23:24:53 crc kubenswrapper[4910]: E0105 23:24:53.409283 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"346b9b475d5cf4120380530cad52e8cd8d2af07fef23afed40e7089da2fe9a94\": container with ID starting with 346b9b475d5cf4120380530cad52e8cd8d2af07fef23afed40e7089da2fe9a94 not found: ID does not exist" containerID="346b9b475d5cf4120380530cad52e8cd8d2af07fef23afed40e7089da2fe9a94" Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.409337 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"346b9b475d5cf4120380530cad52e8cd8d2af07fef23afed40e7089da2fe9a94"} err="failed to get container status \"346b9b475d5cf4120380530cad52e8cd8d2af07fef23afed40e7089da2fe9a94\": rpc error: code = NotFound desc = could not find container \"346b9b475d5cf4120380530cad52e8cd8d2af07fef23afed40e7089da2fe9a94\": container with ID starting with 346b9b475d5cf4120380530cad52e8cd8d2af07fef23afed40e7089da2fe9a94 not found: ID does not exist" Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.418419 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dac76acf-d1d0-40dd-9bba-53f9f52eb844-kube-api-access-5lwl7" (OuterVolumeSpecName: "kube-api-access-5lwl7") pod "dac76acf-d1d0-40dd-9bba-53f9f52eb844" (UID: "dac76acf-d1d0-40dd-9bba-53f9f52eb844"). InnerVolumeSpecName "kube-api-access-5lwl7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.470289 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dac76acf-d1d0-40dd-9bba-53f9f52eb844-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "dac76acf-d1d0-40dd-9bba-53f9f52eb844" (UID: "dac76acf-d1d0-40dd-9bba-53f9f52eb844"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.470304 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dac76acf-d1d0-40dd-9bba-53f9f52eb844-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "dac76acf-d1d0-40dd-9bba-53f9f52eb844" (UID: "dac76acf-d1d0-40dd-9bba-53f9f52eb844"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.472523 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dac76acf-d1d0-40dd-9bba-53f9f52eb844-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "dac76acf-d1d0-40dd-9bba-53f9f52eb844" (UID: "dac76acf-d1d0-40dd-9bba-53f9f52eb844"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.499958 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dac76acf-d1d0-40dd-9bba-53f9f52eb844-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.499993 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dac76acf-d1d0-40dd-9bba-53f9f52eb844-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.500008 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5lwl7\" (UniqueName: \"kubernetes.io/projected/dac76acf-d1d0-40dd-9bba-53f9f52eb844-kube-api-access-5lwl7\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.500020 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dac76acf-d1d0-40dd-9bba-53f9f52eb844-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.502095 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dac76acf-d1d0-40dd-9bba-53f9f52eb844-config" (OuterVolumeSpecName: "config") pod "dac76acf-d1d0-40dd-9bba-53f9f52eb844" (UID: "dac76acf-d1d0-40dd-9bba-53f9f52eb844"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.601651 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dac76acf-d1d0-40dd-9bba-53f9f52eb844-config\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.675049 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6766f689d9-xljtt"] Jan 05 23:24:53 crc kubenswrapper[4910]: I0105 23:24:53.686379 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6766f689d9-xljtt"] Jan 05 23:24:54 crc kubenswrapper[4910]: I0105 23:24:54.742454 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dac76acf-d1d0-40dd-9bba-53f9f52eb844" path="/var/lib/kubelet/pods/dac76acf-d1d0-40dd-9bba-53f9f52eb844/volumes" Jan 05 23:24:55 crc kubenswrapper[4910]: I0105 23:24:55.355843 4910 generic.go:334] "Generic (PLEG): container finished" podID="754e2277-f84a-46a9-b7d8-789d955fe259" containerID="2191725ea8ecb9f8c829b71cbb233cb1f98d51ccfd1a7b66d9593a9626461a69" exitCode=0 Jan 05 23:24:55 crc kubenswrapper[4910]: I0105 23:24:55.355968 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"754e2277-f84a-46a9-b7d8-789d955fe259","Type":"ContainerDied","Data":"2191725ea8ecb9f8c829b71cbb233cb1f98d51ccfd1a7b66d9593a9626461a69"} Jan 05 23:24:55 crc kubenswrapper[4910]: I0105 23:24:55.356275 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"754e2277-f84a-46a9-b7d8-789d955fe259","Type":"ContainerDied","Data":"3a39210f4f37ee6e3cee1a9cc98f91843eff7b4d965a47bb32c47df4bdcc528b"} Jan 05 23:24:55 crc kubenswrapper[4910]: I0105 23:24:55.356297 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3a39210f4f37ee6e3cee1a9cc98f91843eff7b4d965a47bb32c47df4bdcc528b" Jan 05 23:24:55 crc kubenswrapper[4910]: I0105 23:24:55.438784 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 05 23:24:55 crc kubenswrapper[4910]: I0105 23:24:55.551496 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/754e2277-f84a-46a9-b7d8-789d955fe259-config-data\") pod \"754e2277-f84a-46a9-b7d8-789d955fe259\" (UID: \"754e2277-f84a-46a9-b7d8-789d955fe259\") " Jan 05 23:24:55 crc kubenswrapper[4910]: I0105 23:24:55.551796 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/754e2277-f84a-46a9-b7d8-789d955fe259-combined-ca-bundle\") pod \"754e2277-f84a-46a9-b7d8-789d955fe259\" (UID: \"754e2277-f84a-46a9-b7d8-789d955fe259\") " Jan 05 23:24:55 crc kubenswrapper[4910]: I0105 23:24:55.551854 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tt8hr\" (UniqueName: \"kubernetes.io/projected/754e2277-f84a-46a9-b7d8-789d955fe259-kube-api-access-tt8hr\") pod \"754e2277-f84a-46a9-b7d8-789d955fe259\" (UID: \"754e2277-f84a-46a9-b7d8-789d955fe259\") " Jan 05 23:24:55 crc kubenswrapper[4910]: I0105 23:24:55.558619 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/754e2277-f84a-46a9-b7d8-789d955fe259-kube-api-access-tt8hr" (OuterVolumeSpecName: "kube-api-access-tt8hr") pod "754e2277-f84a-46a9-b7d8-789d955fe259" (UID: "754e2277-f84a-46a9-b7d8-789d955fe259"). InnerVolumeSpecName "kube-api-access-tt8hr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:24:55 crc kubenswrapper[4910]: I0105 23:24:55.589481 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/754e2277-f84a-46a9-b7d8-789d955fe259-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "754e2277-f84a-46a9-b7d8-789d955fe259" (UID: "754e2277-f84a-46a9-b7d8-789d955fe259"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:24:55 crc kubenswrapper[4910]: I0105 23:24:55.590472 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/754e2277-f84a-46a9-b7d8-789d955fe259-config-data" (OuterVolumeSpecName: "config-data") pod "754e2277-f84a-46a9-b7d8-789d955fe259" (UID: "754e2277-f84a-46a9-b7d8-789d955fe259"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:24:55 crc kubenswrapper[4910]: I0105 23:24:55.653895 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/754e2277-f84a-46a9-b7d8-789d955fe259-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:55 crc kubenswrapper[4910]: I0105 23:24:55.654104 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tt8hr\" (UniqueName: \"kubernetes.io/projected/754e2277-f84a-46a9-b7d8-789d955fe259-kube-api-access-tt8hr\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:55 crc kubenswrapper[4910]: I0105 23:24:55.654235 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/754e2277-f84a-46a9-b7d8-789d955fe259-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:24:56 crc kubenswrapper[4910]: I0105 23:24:56.367251 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 05 23:24:56 crc kubenswrapper[4910]: I0105 23:24:56.434084 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 23:24:56 crc kubenswrapper[4910]: I0105 23:24:56.456830 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 23:24:56 crc kubenswrapper[4910]: I0105 23:24:56.471939 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 23:24:56 crc kubenswrapper[4910]: E0105 23:24:56.472463 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="754e2277-f84a-46a9-b7d8-789d955fe259" containerName="nova-scheduler-scheduler" Jan 05 23:24:56 crc kubenswrapper[4910]: I0105 23:24:56.472481 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="754e2277-f84a-46a9-b7d8-789d955fe259" containerName="nova-scheduler-scheduler" Jan 05 23:24:56 crc kubenswrapper[4910]: E0105 23:24:56.472527 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dac76acf-d1d0-40dd-9bba-53f9f52eb844" containerName="init" Jan 05 23:24:56 crc kubenswrapper[4910]: I0105 23:24:56.472538 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="dac76acf-d1d0-40dd-9bba-53f9f52eb844" containerName="init" Jan 05 23:24:56 crc kubenswrapper[4910]: E0105 23:24:56.472557 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dac76acf-d1d0-40dd-9bba-53f9f52eb844" containerName="dnsmasq-dns" Jan 05 23:24:56 crc kubenswrapper[4910]: I0105 23:24:56.472565 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="dac76acf-d1d0-40dd-9bba-53f9f52eb844" containerName="dnsmasq-dns" Jan 05 23:24:56 crc kubenswrapper[4910]: I0105 23:24:56.472800 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="dac76acf-d1d0-40dd-9bba-53f9f52eb844" containerName="dnsmasq-dns" Jan 05 23:24:56 crc kubenswrapper[4910]: I0105 23:24:56.472836 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="754e2277-f84a-46a9-b7d8-789d955fe259" containerName="nova-scheduler-scheduler" Jan 05 23:24:56 crc kubenswrapper[4910]: I0105 23:24:56.473828 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 05 23:24:56 crc kubenswrapper[4910]: I0105 23:24:56.483537 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 05 23:24:56 crc kubenswrapper[4910]: I0105 23:24:56.484214 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 23:24:56 crc kubenswrapper[4910]: I0105 23:24:56.569708 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34e77073-5b5d-432d-b7c3-e1e9e35eac78-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"34e77073-5b5d-432d-b7c3-e1e9e35eac78\") " pod="openstack/nova-scheduler-0" Jan 05 23:24:56 crc kubenswrapper[4910]: I0105 23:24:56.569812 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-29z5n\" (UniqueName: \"kubernetes.io/projected/34e77073-5b5d-432d-b7c3-e1e9e35eac78-kube-api-access-29z5n\") pod \"nova-scheduler-0\" (UID: \"34e77073-5b5d-432d-b7c3-e1e9e35eac78\") " pod="openstack/nova-scheduler-0" Jan 05 23:24:56 crc kubenswrapper[4910]: I0105 23:24:56.569939 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34e77073-5b5d-432d-b7c3-e1e9e35eac78-config-data\") pod \"nova-scheduler-0\" (UID: \"34e77073-5b5d-432d-b7c3-e1e9e35eac78\") " pod="openstack/nova-scheduler-0" Jan 05 23:24:56 crc kubenswrapper[4910]: I0105 23:24:56.672556 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34e77073-5b5d-432d-b7c3-e1e9e35eac78-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"34e77073-5b5d-432d-b7c3-e1e9e35eac78\") " pod="openstack/nova-scheduler-0" Jan 05 23:24:56 crc kubenswrapper[4910]: I0105 23:24:56.672658 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-29z5n\" (UniqueName: \"kubernetes.io/projected/34e77073-5b5d-432d-b7c3-e1e9e35eac78-kube-api-access-29z5n\") pod \"nova-scheduler-0\" (UID: \"34e77073-5b5d-432d-b7c3-e1e9e35eac78\") " pod="openstack/nova-scheduler-0" Jan 05 23:24:56 crc kubenswrapper[4910]: I0105 23:24:56.672795 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34e77073-5b5d-432d-b7c3-e1e9e35eac78-config-data\") pod \"nova-scheduler-0\" (UID: \"34e77073-5b5d-432d-b7c3-e1e9e35eac78\") " pod="openstack/nova-scheduler-0" Jan 05 23:24:56 crc kubenswrapper[4910]: I0105 23:24:56.678471 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34e77073-5b5d-432d-b7c3-e1e9e35eac78-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"34e77073-5b5d-432d-b7c3-e1e9e35eac78\") " pod="openstack/nova-scheduler-0" Jan 05 23:24:56 crc kubenswrapper[4910]: I0105 23:24:56.678487 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34e77073-5b5d-432d-b7c3-e1e9e35eac78-config-data\") pod \"nova-scheduler-0\" (UID: \"34e77073-5b5d-432d-b7c3-e1e9e35eac78\") " pod="openstack/nova-scheduler-0" Jan 05 23:24:56 crc kubenswrapper[4910]: I0105 23:24:56.697013 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-29z5n\" (UniqueName: \"kubernetes.io/projected/34e77073-5b5d-432d-b7c3-e1e9e35eac78-kube-api-access-29z5n\") pod \"nova-scheduler-0\" (UID: \"34e77073-5b5d-432d-b7c3-e1e9e35eac78\") " pod="openstack/nova-scheduler-0" Jan 05 23:24:56 crc kubenswrapper[4910]: I0105 23:24:56.742805 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="754e2277-f84a-46a9-b7d8-789d955fe259" path="/var/lib/kubelet/pods/754e2277-f84a-46a9-b7d8-789d955fe259/volumes" Jan 05 23:24:56 crc kubenswrapper[4910]: I0105 23:24:56.798068 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 05 23:24:57 crc kubenswrapper[4910]: I0105 23:24:57.075845 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 05 23:24:57 crc kubenswrapper[4910]: I0105 23:24:57.076237 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 05 23:24:57 crc kubenswrapper[4910]: I0105 23:24:57.295446 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 23:24:57 crc kubenswrapper[4910]: W0105 23:24:57.308616 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod34e77073_5b5d_432d_b7c3_e1e9e35eac78.slice/crio-da4ee824d9028ed0d6246e80392ada8e64f1371509ba1bc9fa8b442d16f20fb5 WatchSource:0}: Error finding container da4ee824d9028ed0d6246e80392ada8e64f1371509ba1bc9fa8b442d16f20fb5: Status 404 returned error can't find the container with id da4ee824d9028ed0d6246e80392ada8e64f1371509ba1bc9fa8b442d16f20fb5 Jan 05 23:24:57 crc kubenswrapper[4910]: I0105 23:24:57.393506 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"34e77073-5b5d-432d-b7c3-e1e9e35eac78","Type":"ContainerStarted","Data":"da4ee824d9028ed0d6246e80392ada8e64f1371509ba1bc9fa8b442d16f20fb5"} Jan 05 23:24:58 crc kubenswrapper[4910]: I0105 23:24:58.411266 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"34e77073-5b5d-432d-b7c3-e1e9e35eac78","Type":"ContainerStarted","Data":"b0c7e82bbf1cdda56199e145746e63030d7af9eed339156786c1863210abc467"} Jan 05 23:24:58 crc kubenswrapper[4910]: I0105 23:24:58.442704 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.4426830170000002 podStartE2EDuration="2.442683017s" podCreationTimestamp="2026-01-05 23:24:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:24:58.437895859 +0000 UTC m=+5630.015393539" watchObservedRunningTime="2026-01-05 23:24:58.442683017 +0000 UTC m=+5630.020180697" Jan 05 23:24:59 crc kubenswrapper[4910]: I0105 23:24:59.746539 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Jan 05 23:25:00 crc kubenswrapper[4910]: I0105 23:25:00.455568 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-4qctf"] Jan 05 23:25:00 crc kubenswrapper[4910]: I0105 23:25:00.458741 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-4qctf" Jan 05 23:25:00 crc kubenswrapper[4910]: I0105 23:25:00.462192 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Jan 05 23:25:00 crc kubenswrapper[4910]: I0105 23:25:00.462431 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Jan 05 23:25:00 crc kubenswrapper[4910]: I0105 23:25:00.550674 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-4qctf"] Jan 05 23:25:00 crc kubenswrapper[4910]: I0105 23:25:00.565057 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ef66051-8f95-4f1c-96dc-608d8a5edcfa-config-data\") pod \"nova-cell1-cell-mapping-4qctf\" (UID: \"8ef66051-8f95-4f1c-96dc-608d8a5edcfa\") " pod="openstack/nova-cell1-cell-mapping-4qctf" Jan 05 23:25:00 crc kubenswrapper[4910]: I0105 23:25:00.565215 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ef66051-8f95-4f1c-96dc-608d8a5edcfa-scripts\") pod \"nova-cell1-cell-mapping-4qctf\" (UID: \"8ef66051-8f95-4f1c-96dc-608d8a5edcfa\") " pod="openstack/nova-cell1-cell-mapping-4qctf" Jan 05 23:25:00 crc kubenswrapper[4910]: I0105 23:25:00.565271 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ef66051-8f95-4f1c-96dc-608d8a5edcfa-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-4qctf\" (UID: \"8ef66051-8f95-4f1c-96dc-608d8a5edcfa\") " pod="openstack/nova-cell1-cell-mapping-4qctf" Jan 05 23:25:00 crc kubenswrapper[4910]: I0105 23:25:00.565311 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-86r4r\" (UniqueName: \"kubernetes.io/projected/8ef66051-8f95-4f1c-96dc-608d8a5edcfa-kube-api-access-86r4r\") pod \"nova-cell1-cell-mapping-4qctf\" (UID: \"8ef66051-8f95-4f1c-96dc-608d8a5edcfa\") " pod="openstack/nova-cell1-cell-mapping-4qctf" Jan 05 23:25:00 crc kubenswrapper[4910]: I0105 23:25:00.667244 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-86r4r\" (UniqueName: \"kubernetes.io/projected/8ef66051-8f95-4f1c-96dc-608d8a5edcfa-kube-api-access-86r4r\") pod \"nova-cell1-cell-mapping-4qctf\" (UID: \"8ef66051-8f95-4f1c-96dc-608d8a5edcfa\") " pod="openstack/nova-cell1-cell-mapping-4qctf" Jan 05 23:25:00 crc kubenswrapper[4910]: I0105 23:25:00.667312 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ef66051-8f95-4f1c-96dc-608d8a5edcfa-config-data\") pod \"nova-cell1-cell-mapping-4qctf\" (UID: \"8ef66051-8f95-4f1c-96dc-608d8a5edcfa\") " pod="openstack/nova-cell1-cell-mapping-4qctf" Jan 05 23:25:00 crc kubenswrapper[4910]: I0105 23:25:00.667393 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ef66051-8f95-4f1c-96dc-608d8a5edcfa-scripts\") pod \"nova-cell1-cell-mapping-4qctf\" (UID: \"8ef66051-8f95-4f1c-96dc-608d8a5edcfa\") " pod="openstack/nova-cell1-cell-mapping-4qctf" Jan 05 23:25:00 crc kubenswrapper[4910]: I0105 23:25:00.667441 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ef66051-8f95-4f1c-96dc-608d8a5edcfa-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-4qctf\" (UID: \"8ef66051-8f95-4f1c-96dc-608d8a5edcfa\") " pod="openstack/nova-cell1-cell-mapping-4qctf" Jan 05 23:25:00 crc kubenswrapper[4910]: I0105 23:25:00.676268 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ef66051-8f95-4f1c-96dc-608d8a5edcfa-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-4qctf\" (UID: \"8ef66051-8f95-4f1c-96dc-608d8a5edcfa\") " pod="openstack/nova-cell1-cell-mapping-4qctf" Jan 05 23:25:00 crc kubenswrapper[4910]: I0105 23:25:00.677590 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ef66051-8f95-4f1c-96dc-608d8a5edcfa-config-data\") pod \"nova-cell1-cell-mapping-4qctf\" (UID: \"8ef66051-8f95-4f1c-96dc-608d8a5edcfa\") " pod="openstack/nova-cell1-cell-mapping-4qctf" Jan 05 23:25:00 crc kubenswrapper[4910]: I0105 23:25:00.706831 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-86r4r\" (UniqueName: \"kubernetes.io/projected/8ef66051-8f95-4f1c-96dc-608d8a5edcfa-kube-api-access-86r4r\") pod \"nova-cell1-cell-mapping-4qctf\" (UID: \"8ef66051-8f95-4f1c-96dc-608d8a5edcfa\") " pod="openstack/nova-cell1-cell-mapping-4qctf" Jan 05 23:25:00 crc kubenswrapper[4910]: I0105 23:25:00.710803 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ef66051-8f95-4f1c-96dc-608d8a5edcfa-scripts\") pod \"nova-cell1-cell-mapping-4qctf\" (UID: \"8ef66051-8f95-4f1c-96dc-608d8a5edcfa\") " pod="openstack/nova-cell1-cell-mapping-4qctf" Jan 05 23:25:00 crc kubenswrapper[4910]: I0105 23:25:00.808809 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-4qctf" Jan 05 23:25:01 crc kubenswrapper[4910]: I0105 23:25:01.256271 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-4qctf"] Jan 05 23:25:01 crc kubenswrapper[4910]: I0105 23:25:01.449895 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-4qctf" event={"ID":"8ef66051-8f95-4f1c-96dc-608d8a5edcfa","Type":"ContainerStarted","Data":"64a87a2f45ae5ca5114aeae8b7ec1b25e67732092d27cbdcd31462f3c447a638"} Jan 05 23:25:01 crc kubenswrapper[4910]: I0105 23:25:01.798938 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 05 23:25:02 crc kubenswrapper[4910]: I0105 23:25:02.068385 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 05 23:25:02 crc kubenswrapper[4910]: I0105 23:25:02.068844 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 05 23:25:02 crc kubenswrapper[4910]: I0105 23:25:02.069003 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 05 23:25:02 crc kubenswrapper[4910]: I0105 23:25:02.069097 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 05 23:25:02 crc kubenswrapper[4910]: I0105 23:25:02.460308 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-4qctf" event={"ID":"8ef66051-8f95-4f1c-96dc-608d8a5edcfa","Type":"ContainerStarted","Data":"1f6e25575c530d8344e8c2a4b8060e2ca007f1d88ef98d44f63e4c14c871dcad"} Jan 05 23:25:02 crc kubenswrapper[4910]: I0105 23:25:02.488299 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-4qctf" podStartSLOduration=2.488270748 podStartE2EDuration="2.488270748s" podCreationTimestamp="2026-01-05 23:25:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:25:02.483548721 +0000 UTC m=+5634.061046381" watchObservedRunningTime="2026-01-05 23:25:02.488270748 +0000 UTC m=+5634.065768418" Jan 05 23:25:03 crc kubenswrapper[4910]: I0105 23:25:03.234443 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="09fc0e9f-42fa-4177-aa14-680c36d4b679" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.72:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 05 23:25:03 crc kubenswrapper[4910]: I0105 23:25:03.234629 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="09fc0e9f-42fa-4177-aa14-680c36d4b679" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.72:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 05 23:25:03 crc kubenswrapper[4910]: I0105 23:25:03.234634 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="0ffce34a-e594-439d-a15e-b3740ebdbab8" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.71:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 05 23:25:03 crc kubenswrapper[4910]: I0105 23:25:03.234789 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="0ffce34a-e594-439d-a15e-b3740ebdbab8" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.71:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 05 23:25:06 crc kubenswrapper[4910]: I0105 23:25:06.506113 4910 generic.go:334] "Generic (PLEG): container finished" podID="8ef66051-8f95-4f1c-96dc-608d8a5edcfa" containerID="1f6e25575c530d8344e8c2a4b8060e2ca007f1d88ef98d44f63e4c14c871dcad" exitCode=0 Jan 05 23:25:06 crc kubenswrapper[4910]: I0105 23:25:06.506225 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-4qctf" event={"ID":"8ef66051-8f95-4f1c-96dc-608d8a5edcfa","Type":"ContainerDied","Data":"1f6e25575c530d8344e8c2a4b8060e2ca007f1d88ef98d44f63e4c14c871dcad"} Jan 05 23:25:06 crc kubenswrapper[4910]: I0105 23:25:06.800442 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 05 23:25:06 crc kubenswrapper[4910]: I0105 23:25:06.853650 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 05 23:25:07 crc kubenswrapper[4910]: I0105 23:25:07.586742 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 05 23:25:07 crc kubenswrapper[4910]: I0105 23:25:07.930087 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-4qctf" Jan 05 23:25:08 crc kubenswrapper[4910]: I0105 23:25:08.021515 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-86r4r\" (UniqueName: \"kubernetes.io/projected/8ef66051-8f95-4f1c-96dc-608d8a5edcfa-kube-api-access-86r4r\") pod \"8ef66051-8f95-4f1c-96dc-608d8a5edcfa\" (UID: \"8ef66051-8f95-4f1c-96dc-608d8a5edcfa\") " Jan 05 23:25:08 crc kubenswrapper[4910]: I0105 23:25:08.021798 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ef66051-8f95-4f1c-96dc-608d8a5edcfa-scripts\") pod \"8ef66051-8f95-4f1c-96dc-608d8a5edcfa\" (UID: \"8ef66051-8f95-4f1c-96dc-608d8a5edcfa\") " Jan 05 23:25:08 crc kubenswrapper[4910]: I0105 23:25:08.021873 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ef66051-8f95-4f1c-96dc-608d8a5edcfa-combined-ca-bundle\") pod \"8ef66051-8f95-4f1c-96dc-608d8a5edcfa\" (UID: \"8ef66051-8f95-4f1c-96dc-608d8a5edcfa\") " Jan 05 23:25:08 crc kubenswrapper[4910]: I0105 23:25:08.021938 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ef66051-8f95-4f1c-96dc-608d8a5edcfa-config-data\") pod \"8ef66051-8f95-4f1c-96dc-608d8a5edcfa\" (UID: \"8ef66051-8f95-4f1c-96dc-608d8a5edcfa\") " Jan 05 23:25:08 crc kubenswrapper[4910]: I0105 23:25:08.031179 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ef66051-8f95-4f1c-96dc-608d8a5edcfa-scripts" (OuterVolumeSpecName: "scripts") pod "8ef66051-8f95-4f1c-96dc-608d8a5edcfa" (UID: "8ef66051-8f95-4f1c-96dc-608d8a5edcfa"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:25:08 crc kubenswrapper[4910]: I0105 23:25:08.038454 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ef66051-8f95-4f1c-96dc-608d8a5edcfa-kube-api-access-86r4r" (OuterVolumeSpecName: "kube-api-access-86r4r") pod "8ef66051-8f95-4f1c-96dc-608d8a5edcfa" (UID: "8ef66051-8f95-4f1c-96dc-608d8a5edcfa"). InnerVolumeSpecName "kube-api-access-86r4r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:25:08 crc kubenswrapper[4910]: I0105 23:25:08.057441 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ef66051-8f95-4f1c-96dc-608d8a5edcfa-config-data" (OuterVolumeSpecName: "config-data") pod "8ef66051-8f95-4f1c-96dc-608d8a5edcfa" (UID: "8ef66051-8f95-4f1c-96dc-608d8a5edcfa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:25:08 crc kubenswrapper[4910]: I0105 23:25:08.094328 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ef66051-8f95-4f1c-96dc-608d8a5edcfa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8ef66051-8f95-4f1c-96dc-608d8a5edcfa" (UID: "8ef66051-8f95-4f1c-96dc-608d8a5edcfa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:25:08 crc kubenswrapper[4910]: I0105 23:25:08.124406 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ef66051-8f95-4f1c-96dc-608d8a5edcfa-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:25:08 crc kubenswrapper[4910]: I0105 23:25:08.124458 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ef66051-8f95-4f1c-96dc-608d8a5edcfa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:25:08 crc kubenswrapper[4910]: I0105 23:25:08.124471 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ef66051-8f95-4f1c-96dc-608d8a5edcfa-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:25:08 crc kubenswrapper[4910]: I0105 23:25:08.124484 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-86r4r\" (UniqueName: \"kubernetes.io/projected/8ef66051-8f95-4f1c-96dc-608d8a5edcfa-kube-api-access-86r4r\") on node \"crc\" DevicePath \"\"" Jan 05 23:25:08 crc kubenswrapper[4910]: I0105 23:25:08.532572 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-4qctf" Jan 05 23:25:08 crc kubenswrapper[4910]: I0105 23:25:08.533235 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-4qctf" event={"ID":"8ef66051-8f95-4f1c-96dc-608d8a5edcfa","Type":"ContainerDied","Data":"64a87a2f45ae5ca5114aeae8b7ec1b25e67732092d27cbdcd31462f3c447a638"} Jan 05 23:25:08 crc kubenswrapper[4910]: I0105 23:25:08.533285 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64a87a2f45ae5ca5114aeae8b7ec1b25e67732092d27cbdcd31462f3c447a638" Jan 05 23:25:08 crc kubenswrapper[4910]: E0105 23:25:08.747349 4910 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8ef66051_8f95_4f1c_96dc_608d8a5edcfa.slice\": RecentStats: unable to find data in memory cache]" Jan 05 23:25:08 crc kubenswrapper[4910]: I0105 23:25:08.823382 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 23:25:08 crc kubenswrapper[4910]: I0105 23:25:08.840348 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 05 23:25:08 crc kubenswrapper[4910]: I0105 23:25:08.840634 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="09fc0e9f-42fa-4177-aa14-680c36d4b679" containerName="nova-api-log" containerID="cri-o://da3f0d7a8ed1eea2cb193652991a51d98bc426bd93c7c77347f5a114ec10d95a" gracePeriod=30 Jan 05 23:25:08 crc kubenswrapper[4910]: I0105 23:25:08.841166 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="09fc0e9f-42fa-4177-aa14-680c36d4b679" containerName="nova-api-api" containerID="cri-o://d13b6fbaadca358a5dcbbb25ade3d32d3e64cb0b633bbc40f067a4a1f063d2e3" gracePeriod=30 Jan 05 23:25:08 crc kubenswrapper[4910]: I0105 23:25:08.855290 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 23:25:08 crc kubenswrapper[4910]: I0105 23:25:08.855710 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="0ffce34a-e594-439d-a15e-b3740ebdbab8" containerName="nova-metadata-log" containerID="cri-o://e1d87310adfcc58ff0dbcce33fb401ead26831bbb19cb431ba43ec72aacee30b" gracePeriod=30 Jan 05 23:25:08 crc kubenswrapper[4910]: I0105 23:25:08.856478 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="0ffce34a-e594-439d-a15e-b3740ebdbab8" containerName="nova-metadata-metadata" containerID="cri-o://056759bb1a9d7538f78f95f0831254528d6a3437e98e4b981a204b193f028d59" gracePeriod=30 Jan 05 23:25:09 crc kubenswrapper[4910]: I0105 23:25:09.543292 4910 generic.go:334] "Generic (PLEG): container finished" podID="0ffce34a-e594-439d-a15e-b3740ebdbab8" containerID="e1d87310adfcc58ff0dbcce33fb401ead26831bbb19cb431ba43ec72aacee30b" exitCode=143 Jan 05 23:25:09 crc kubenswrapper[4910]: I0105 23:25:09.543367 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0ffce34a-e594-439d-a15e-b3740ebdbab8","Type":"ContainerDied","Data":"e1d87310adfcc58ff0dbcce33fb401ead26831bbb19cb431ba43ec72aacee30b"} Jan 05 23:25:09 crc kubenswrapper[4910]: I0105 23:25:09.545168 4910 generic.go:334] "Generic (PLEG): container finished" podID="09fc0e9f-42fa-4177-aa14-680c36d4b679" containerID="da3f0d7a8ed1eea2cb193652991a51d98bc426bd93c7c77347f5a114ec10d95a" exitCode=143 Jan 05 23:25:09 crc kubenswrapper[4910]: I0105 23:25:09.545365 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="34e77073-5b5d-432d-b7c3-e1e9e35eac78" containerName="nova-scheduler-scheduler" containerID="cri-o://b0c7e82bbf1cdda56199e145746e63030d7af9eed339156786c1863210abc467" gracePeriod=30 Jan 05 23:25:09 crc kubenswrapper[4910]: I0105 23:25:09.545678 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"09fc0e9f-42fa-4177-aa14-680c36d4b679","Type":"ContainerDied","Data":"da3f0d7a8ed1eea2cb193652991a51d98bc426bd93c7c77347f5a114ec10d95a"} Jan 05 23:25:11 crc kubenswrapper[4910]: E0105 23:25:11.802348 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b0c7e82bbf1cdda56199e145746e63030d7af9eed339156786c1863210abc467" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 05 23:25:11 crc kubenswrapper[4910]: E0105 23:25:11.805188 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b0c7e82bbf1cdda56199e145746e63030d7af9eed339156786c1863210abc467" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 05 23:25:11 crc kubenswrapper[4910]: E0105 23:25:11.807343 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b0c7e82bbf1cdda56199e145746e63030d7af9eed339156786c1863210abc467" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 05 23:25:11 crc kubenswrapper[4910]: E0105 23:25:11.807471 4910 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="34e77073-5b5d-432d-b7c3-e1e9e35eac78" containerName="nova-scheduler-scheduler" Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.556255 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.595424 4910 generic.go:334] "Generic (PLEG): container finished" podID="09fc0e9f-42fa-4177-aa14-680c36d4b679" containerID="d13b6fbaadca358a5dcbbb25ade3d32d3e64cb0b633bbc40f067a4a1f063d2e3" exitCode=0 Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.595499 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"09fc0e9f-42fa-4177-aa14-680c36d4b679","Type":"ContainerDied","Data":"d13b6fbaadca358a5dcbbb25ade3d32d3e64cb0b633bbc40f067a4a1f063d2e3"} Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.598520 4910 generic.go:334] "Generic (PLEG): container finished" podID="0ffce34a-e594-439d-a15e-b3740ebdbab8" containerID="056759bb1a9d7538f78f95f0831254528d6a3437e98e4b981a204b193f028d59" exitCode=0 Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.598565 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0ffce34a-e594-439d-a15e-b3740ebdbab8","Type":"ContainerDied","Data":"056759bb1a9d7538f78f95f0831254528d6a3437e98e4b981a204b193f028d59"} Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.598596 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0ffce34a-e594-439d-a15e-b3740ebdbab8","Type":"ContainerDied","Data":"77bc7e0d4c107c2b1ff85ae9aa6379f69b9fedfcaa40060dc37f8d59ca3ec43d"} Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.598614 4910 scope.go:117] "RemoveContainer" containerID="056759bb1a9d7538f78f95f0831254528d6a3437e98e4b981a204b193f028d59" Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.598822 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.631686 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.642443 4910 scope.go:117] "RemoveContainer" containerID="e1d87310adfcc58ff0dbcce33fb401ead26831bbb19cb431ba43ec72aacee30b" Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.669605 4910 scope.go:117] "RemoveContainer" containerID="056759bb1a9d7538f78f95f0831254528d6a3437e98e4b981a204b193f028d59" Jan 05 23:25:12 crc kubenswrapper[4910]: E0105 23:25:12.670333 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"056759bb1a9d7538f78f95f0831254528d6a3437e98e4b981a204b193f028d59\": container with ID starting with 056759bb1a9d7538f78f95f0831254528d6a3437e98e4b981a204b193f028d59 not found: ID does not exist" containerID="056759bb1a9d7538f78f95f0831254528d6a3437e98e4b981a204b193f028d59" Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.670365 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"056759bb1a9d7538f78f95f0831254528d6a3437e98e4b981a204b193f028d59"} err="failed to get container status \"056759bb1a9d7538f78f95f0831254528d6a3437e98e4b981a204b193f028d59\": rpc error: code = NotFound desc = could not find container \"056759bb1a9d7538f78f95f0831254528d6a3437e98e4b981a204b193f028d59\": container with ID starting with 056759bb1a9d7538f78f95f0831254528d6a3437e98e4b981a204b193f028d59 not found: ID does not exist" Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.670388 4910 scope.go:117] "RemoveContainer" containerID="e1d87310adfcc58ff0dbcce33fb401ead26831bbb19cb431ba43ec72aacee30b" Jan 05 23:25:12 crc kubenswrapper[4910]: E0105 23:25:12.670868 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1d87310adfcc58ff0dbcce33fb401ead26831bbb19cb431ba43ec72aacee30b\": container with ID starting with e1d87310adfcc58ff0dbcce33fb401ead26831bbb19cb431ba43ec72aacee30b not found: ID does not exist" containerID="e1d87310adfcc58ff0dbcce33fb401ead26831bbb19cb431ba43ec72aacee30b" Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.670891 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1d87310adfcc58ff0dbcce33fb401ead26831bbb19cb431ba43ec72aacee30b"} err="failed to get container status \"e1d87310adfcc58ff0dbcce33fb401ead26831bbb19cb431ba43ec72aacee30b\": rpc error: code = NotFound desc = could not find container \"e1d87310adfcc58ff0dbcce33fb401ead26831bbb19cb431ba43ec72aacee30b\": container with ID starting with e1d87310adfcc58ff0dbcce33fb401ead26831bbb19cb431ba43ec72aacee30b not found: ID does not exist" Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.711634 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ffce34a-e594-439d-a15e-b3740ebdbab8-combined-ca-bundle\") pod \"0ffce34a-e594-439d-a15e-b3740ebdbab8\" (UID: \"0ffce34a-e594-439d-a15e-b3740ebdbab8\") " Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.711759 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9zx94\" (UniqueName: \"kubernetes.io/projected/0ffce34a-e594-439d-a15e-b3740ebdbab8-kube-api-access-9zx94\") pod \"0ffce34a-e594-439d-a15e-b3740ebdbab8\" (UID: \"0ffce34a-e594-439d-a15e-b3740ebdbab8\") " Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.711890 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0ffce34a-e594-439d-a15e-b3740ebdbab8-logs\") pod \"0ffce34a-e594-439d-a15e-b3740ebdbab8\" (UID: \"0ffce34a-e594-439d-a15e-b3740ebdbab8\") " Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.712406 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0ffce34a-e594-439d-a15e-b3740ebdbab8-logs" (OuterVolumeSpecName: "logs") pod "0ffce34a-e594-439d-a15e-b3740ebdbab8" (UID: "0ffce34a-e594-439d-a15e-b3740ebdbab8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.712513 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ffce34a-e594-439d-a15e-b3740ebdbab8-config-data\") pod \"0ffce34a-e594-439d-a15e-b3740ebdbab8\" (UID: \"0ffce34a-e594-439d-a15e-b3740ebdbab8\") " Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.713549 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0ffce34a-e594-439d-a15e-b3740ebdbab8-logs\") on node \"crc\" DevicePath \"\"" Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.717144 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0ffce34a-e594-439d-a15e-b3740ebdbab8-kube-api-access-9zx94" (OuterVolumeSpecName: "kube-api-access-9zx94") pod "0ffce34a-e594-439d-a15e-b3740ebdbab8" (UID: "0ffce34a-e594-439d-a15e-b3740ebdbab8"). InnerVolumeSpecName "kube-api-access-9zx94". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.735173 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0ffce34a-e594-439d-a15e-b3740ebdbab8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0ffce34a-e594-439d-a15e-b3740ebdbab8" (UID: "0ffce34a-e594-439d-a15e-b3740ebdbab8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.743870 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0ffce34a-e594-439d-a15e-b3740ebdbab8-config-data" (OuterVolumeSpecName: "config-data") pod "0ffce34a-e594-439d-a15e-b3740ebdbab8" (UID: "0ffce34a-e594-439d-a15e-b3740ebdbab8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.815305 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kkm8t\" (UniqueName: \"kubernetes.io/projected/09fc0e9f-42fa-4177-aa14-680c36d4b679-kube-api-access-kkm8t\") pod \"09fc0e9f-42fa-4177-aa14-680c36d4b679\" (UID: \"09fc0e9f-42fa-4177-aa14-680c36d4b679\") " Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.815513 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09fc0e9f-42fa-4177-aa14-680c36d4b679-config-data\") pod \"09fc0e9f-42fa-4177-aa14-680c36d4b679\" (UID: \"09fc0e9f-42fa-4177-aa14-680c36d4b679\") " Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.815792 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09fc0e9f-42fa-4177-aa14-680c36d4b679-combined-ca-bundle\") pod \"09fc0e9f-42fa-4177-aa14-680c36d4b679\" (UID: \"09fc0e9f-42fa-4177-aa14-680c36d4b679\") " Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.815877 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/09fc0e9f-42fa-4177-aa14-680c36d4b679-logs\") pod \"09fc0e9f-42fa-4177-aa14-680c36d4b679\" (UID: \"09fc0e9f-42fa-4177-aa14-680c36d4b679\") " Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.816650 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ffce34a-e594-439d-a15e-b3740ebdbab8-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.816690 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ffce34a-e594-439d-a15e-b3740ebdbab8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.816712 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9zx94\" (UniqueName: \"kubernetes.io/projected/0ffce34a-e594-439d-a15e-b3740ebdbab8-kube-api-access-9zx94\") on node \"crc\" DevicePath \"\"" Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.816764 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09fc0e9f-42fa-4177-aa14-680c36d4b679-logs" (OuterVolumeSpecName: "logs") pod "09fc0e9f-42fa-4177-aa14-680c36d4b679" (UID: "09fc0e9f-42fa-4177-aa14-680c36d4b679"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.818158 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09fc0e9f-42fa-4177-aa14-680c36d4b679-kube-api-access-kkm8t" (OuterVolumeSpecName: "kube-api-access-kkm8t") pod "09fc0e9f-42fa-4177-aa14-680c36d4b679" (UID: "09fc0e9f-42fa-4177-aa14-680c36d4b679"). InnerVolumeSpecName "kube-api-access-kkm8t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.841324 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09fc0e9f-42fa-4177-aa14-680c36d4b679-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "09fc0e9f-42fa-4177-aa14-680c36d4b679" (UID: "09fc0e9f-42fa-4177-aa14-680c36d4b679"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.844030 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09fc0e9f-42fa-4177-aa14-680c36d4b679-config-data" (OuterVolumeSpecName: "config-data") pod "09fc0e9f-42fa-4177-aa14-680c36d4b679" (UID: "09fc0e9f-42fa-4177-aa14-680c36d4b679"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.918308 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09fc0e9f-42fa-4177-aa14-680c36d4b679-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.918352 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09fc0e9f-42fa-4177-aa14-680c36d4b679-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.918371 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/09fc0e9f-42fa-4177-aa14-680c36d4b679-logs\") on node \"crc\" DevicePath \"\"" Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.918385 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kkm8t\" (UniqueName: \"kubernetes.io/projected/09fc0e9f-42fa-4177-aa14-680c36d4b679-kube-api-access-kkm8t\") on node \"crc\" DevicePath \"\"" Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.950176 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 23:25:12 crc kubenswrapper[4910]: I0105 23:25:12.993391 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.006851 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 05 23:25:13 crc kubenswrapper[4910]: E0105 23:25:13.007347 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09fc0e9f-42fa-4177-aa14-680c36d4b679" containerName="nova-api-log" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.007375 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="09fc0e9f-42fa-4177-aa14-680c36d4b679" containerName="nova-api-log" Jan 05 23:25:13 crc kubenswrapper[4910]: E0105 23:25:13.007392 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09fc0e9f-42fa-4177-aa14-680c36d4b679" containerName="nova-api-api" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.007400 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="09fc0e9f-42fa-4177-aa14-680c36d4b679" containerName="nova-api-api" Jan 05 23:25:13 crc kubenswrapper[4910]: E0105 23:25:13.007421 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ffce34a-e594-439d-a15e-b3740ebdbab8" containerName="nova-metadata-metadata" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.007429 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ffce34a-e594-439d-a15e-b3740ebdbab8" containerName="nova-metadata-metadata" Jan 05 23:25:13 crc kubenswrapper[4910]: E0105 23:25:13.007441 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ef66051-8f95-4f1c-96dc-608d8a5edcfa" containerName="nova-manage" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.007448 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ef66051-8f95-4f1c-96dc-608d8a5edcfa" containerName="nova-manage" Jan 05 23:25:13 crc kubenswrapper[4910]: E0105 23:25:13.007470 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ffce34a-e594-439d-a15e-b3740ebdbab8" containerName="nova-metadata-log" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.007478 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ffce34a-e594-439d-a15e-b3740ebdbab8" containerName="nova-metadata-log" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.007698 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="09fc0e9f-42fa-4177-aa14-680c36d4b679" containerName="nova-api-log" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.007718 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ffce34a-e594-439d-a15e-b3740ebdbab8" containerName="nova-metadata-metadata" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.007726 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ef66051-8f95-4f1c-96dc-608d8a5edcfa" containerName="nova-manage" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.007738 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="09fc0e9f-42fa-4177-aa14-680c36d4b679" containerName="nova-api-api" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.007748 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ffce34a-e594-439d-a15e-b3740ebdbab8" containerName="nova-metadata-log" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.009093 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.012840 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.020941 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.121498 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v5nsv\" (UniqueName: \"kubernetes.io/projected/02bc116b-3352-4eb9-9c44-1283f355e711-kube-api-access-v5nsv\") pod \"nova-metadata-0\" (UID: \"02bc116b-3352-4eb9-9c44-1283f355e711\") " pod="openstack/nova-metadata-0" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.121591 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/02bc116b-3352-4eb9-9c44-1283f355e711-logs\") pod \"nova-metadata-0\" (UID: \"02bc116b-3352-4eb9-9c44-1283f355e711\") " pod="openstack/nova-metadata-0" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.122064 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02bc116b-3352-4eb9-9c44-1283f355e711-config-data\") pod \"nova-metadata-0\" (UID: \"02bc116b-3352-4eb9-9c44-1283f355e711\") " pod="openstack/nova-metadata-0" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.122466 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02bc116b-3352-4eb9-9c44-1283f355e711-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"02bc116b-3352-4eb9-9c44-1283f355e711\") " pod="openstack/nova-metadata-0" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.223706 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/02bc116b-3352-4eb9-9c44-1283f355e711-logs\") pod \"nova-metadata-0\" (UID: \"02bc116b-3352-4eb9-9c44-1283f355e711\") " pod="openstack/nova-metadata-0" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.223829 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02bc116b-3352-4eb9-9c44-1283f355e711-config-data\") pod \"nova-metadata-0\" (UID: \"02bc116b-3352-4eb9-9c44-1283f355e711\") " pod="openstack/nova-metadata-0" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.223882 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02bc116b-3352-4eb9-9c44-1283f355e711-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"02bc116b-3352-4eb9-9c44-1283f355e711\") " pod="openstack/nova-metadata-0" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.223929 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v5nsv\" (UniqueName: \"kubernetes.io/projected/02bc116b-3352-4eb9-9c44-1283f355e711-kube-api-access-v5nsv\") pod \"nova-metadata-0\" (UID: \"02bc116b-3352-4eb9-9c44-1283f355e711\") " pod="openstack/nova-metadata-0" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.224556 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/02bc116b-3352-4eb9-9c44-1283f355e711-logs\") pod \"nova-metadata-0\" (UID: \"02bc116b-3352-4eb9-9c44-1283f355e711\") " pod="openstack/nova-metadata-0" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.231253 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02bc116b-3352-4eb9-9c44-1283f355e711-config-data\") pod \"nova-metadata-0\" (UID: \"02bc116b-3352-4eb9-9c44-1283f355e711\") " pod="openstack/nova-metadata-0" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.231351 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02bc116b-3352-4eb9-9c44-1283f355e711-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"02bc116b-3352-4eb9-9c44-1283f355e711\") " pod="openstack/nova-metadata-0" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.252877 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v5nsv\" (UniqueName: \"kubernetes.io/projected/02bc116b-3352-4eb9-9c44-1283f355e711-kube-api-access-v5nsv\") pod \"nova-metadata-0\" (UID: \"02bc116b-3352-4eb9-9c44-1283f355e711\") " pod="openstack/nova-metadata-0" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.342782 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.618405 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"09fc0e9f-42fa-4177-aa14-680c36d4b679","Type":"ContainerDied","Data":"9841bf57b6f7d7cd2ded2ebbf8ce564cb7b8f209dd273045031aca7476bdbe9c"} Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.618765 4910 scope.go:117] "RemoveContainer" containerID="d13b6fbaadca358a5dcbbb25ade3d32d3e64cb0b633bbc40f067a4a1f063d2e3" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.618468 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.648416 4910 scope.go:117] "RemoveContainer" containerID="da3f0d7a8ed1eea2cb193652991a51d98bc426bd93c7c77347f5a114ec10d95a" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.666225 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.674249 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.695865 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.697589 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.700728 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.708549 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.840095 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee48c3fa-ab41-4e07-ba38-f7195ac868e0-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ee48c3fa-ab41-4e07-ba38-f7195ac868e0\") " pod="openstack/nova-api-0" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.841940 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee48c3fa-ab41-4e07-ba38-f7195ac868e0-logs\") pod \"nova-api-0\" (UID: \"ee48c3fa-ab41-4e07-ba38-f7195ac868e0\") " pod="openstack/nova-api-0" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.842204 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee48c3fa-ab41-4e07-ba38-f7195ac868e0-config-data\") pod \"nova-api-0\" (UID: \"ee48c3fa-ab41-4e07-ba38-f7195ac868e0\") " pod="openstack/nova-api-0" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.842431 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5bsnp\" (UniqueName: \"kubernetes.io/projected/ee48c3fa-ab41-4e07-ba38-f7195ac868e0-kube-api-access-5bsnp\") pod \"nova-api-0\" (UID: \"ee48c3fa-ab41-4e07-ba38-f7195ac868e0\") " pod="openstack/nova-api-0" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.936223 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.944822 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee48c3fa-ab41-4e07-ba38-f7195ac868e0-config-data\") pod \"nova-api-0\" (UID: \"ee48c3fa-ab41-4e07-ba38-f7195ac868e0\") " pod="openstack/nova-api-0" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.944905 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5bsnp\" (UniqueName: \"kubernetes.io/projected/ee48c3fa-ab41-4e07-ba38-f7195ac868e0-kube-api-access-5bsnp\") pod \"nova-api-0\" (UID: \"ee48c3fa-ab41-4e07-ba38-f7195ac868e0\") " pod="openstack/nova-api-0" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.945004 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee48c3fa-ab41-4e07-ba38-f7195ac868e0-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ee48c3fa-ab41-4e07-ba38-f7195ac868e0\") " pod="openstack/nova-api-0" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.945074 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee48c3fa-ab41-4e07-ba38-f7195ac868e0-logs\") pod \"nova-api-0\" (UID: \"ee48c3fa-ab41-4e07-ba38-f7195ac868e0\") " pod="openstack/nova-api-0" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.945756 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee48c3fa-ab41-4e07-ba38-f7195ac868e0-logs\") pod \"nova-api-0\" (UID: \"ee48c3fa-ab41-4e07-ba38-f7195ac868e0\") " pod="openstack/nova-api-0" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.953347 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee48c3fa-ab41-4e07-ba38-f7195ac868e0-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ee48c3fa-ab41-4e07-ba38-f7195ac868e0\") " pod="openstack/nova-api-0" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.959430 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee48c3fa-ab41-4e07-ba38-f7195ac868e0-config-data\") pod \"nova-api-0\" (UID: \"ee48c3fa-ab41-4e07-ba38-f7195ac868e0\") " pod="openstack/nova-api-0" Jan 05 23:25:13 crc kubenswrapper[4910]: I0105 23:25:13.965000 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5bsnp\" (UniqueName: \"kubernetes.io/projected/ee48c3fa-ab41-4e07-ba38-f7195ac868e0-kube-api-access-5bsnp\") pod \"nova-api-0\" (UID: \"ee48c3fa-ab41-4e07-ba38-f7195ac868e0\") " pod="openstack/nova-api-0" Jan 05 23:25:14 crc kubenswrapper[4910]: I0105 23:25:14.017529 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 05 23:25:14 crc kubenswrapper[4910]: I0105 23:25:14.527030 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 05 23:25:14 crc kubenswrapper[4910]: W0105 23:25:14.541864 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podee48c3fa_ab41_4e07_ba38_f7195ac868e0.slice/crio-f5ba95223b9b2348f65c9382f3ae84b1e615ff7dbc18678c6bd89eb3d712d73e WatchSource:0}: Error finding container f5ba95223b9b2348f65c9382f3ae84b1e615ff7dbc18678c6bd89eb3d712d73e: Status 404 returned error can't find the container with id f5ba95223b9b2348f65c9382f3ae84b1e615ff7dbc18678c6bd89eb3d712d73e Jan 05 23:25:14 crc kubenswrapper[4910]: I0105 23:25:14.542563 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 05 23:25:14 crc kubenswrapper[4910]: I0105 23:25:14.649026 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"02bc116b-3352-4eb9-9c44-1283f355e711","Type":"ContainerStarted","Data":"9d9807d52b8f4547bf4a1eeef9201fc4433ff914522b4dc543a47126db851a66"} Jan 05 23:25:14 crc kubenswrapper[4910]: I0105 23:25:14.649105 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"02bc116b-3352-4eb9-9c44-1283f355e711","Type":"ContainerStarted","Data":"3aa4cc65ec07b88424cae352c91af36b9d06998d7bb619a8af57d0ef39c45579"} Jan 05 23:25:14 crc kubenswrapper[4910]: I0105 23:25:14.649156 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"02bc116b-3352-4eb9-9c44-1283f355e711","Type":"ContainerStarted","Data":"f63f22f7cd3d70ce75b125bf0acdc5497e805efde1b50bccb07bf29d03336789"} Jan 05 23:25:14 crc kubenswrapper[4910]: I0105 23:25:14.658868 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34e77073-5b5d-432d-b7c3-e1e9e35eac78-config-data\") pod \"34e77073-5b5d-432d-b7c3-e1e9e35eac78\" (UID: \"34e77073-5b5d-432d-b7c3-e1e9e35eac78\") " Jan 05 23:25:14 crc kubenswrapper[4910]: I0105 23:25:14.659023 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-29z5n\" (UniqueName: \"kubernetes.io/projected/34e77073-5b5d-432d-b7c3-e1e9e35eac78-kube-api-access-29z5n\") pod \"34e77073-5b5d-432d-b7c3-e1e9e35eac78\" (UID: \"34e77073-5b5d-432d-b7c3-e1e9e35eac78\") " Jan 05 23:25:14 crc kubenswrapper[4910]: I0105 23:25:14.659144 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34e77073-5b5d-432d-b7c3-e1e9e35eac78-combined-ca-bundle\") pod \"34e77073-5b5d-432d-b7c3-e1e9e35eac78\" (UID: \"34e77073-5b5d-432d-b7c3-e1e9e35eac78\") " Jan 05 23:25:14 crc kubenswrapper[4910]: I0105 23:25:14.659351 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ee48c3fa-ab41-4e07-ba38-f7195ac868e0","Type":"ContainerStarted","Data":"f5ba95223b9b2348f65c9382f3ae84b1e615ff7dbc18678c6bd89eb3d712d73e"} Jan 05 23:25:14 crc kubenswrapper[4910]: I0105 23:25:14.662964 4910 generic.go:334] "Generic (PLEG): container finished" podID="34e77073-5b5d-432d-b7c3-e1e9e35eac78" containerID="b0c7e82bbf1cdda56199e145746e63030d7af9eed339156786c1863210abc467" exitCode=0 Jan 05 23:25:14 crc kubenswrapper[4910]: I0105 23:25:14.663004 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 05 23:25:14 crc kubenswrapper[4910]: I0105 23:25:14.663012 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"34e77073-5b5d-432d-b7c3-e1e9e35eac78","Type":"ContainerDied","Data":"b0c7e82bbf1cdda56199e145746e63030d7af9eed339156786c1863210abc467"} Jan 05 23:25:14 crc kubenswrapper[4910]: I0105 23:25:14.663046 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"34e77073-5b5d-432d-b7c3-e1e9e35eac78","Type":"ContainerDied","Data":"da4ee824d9028ed0d6246e80392ada8e64f1371509ba1bc9fa8b442d16f20fb5"} Jan 05 23:25:14 crc kubenswrapper[4910]: I0105 23:25:14.663066 4910 scope.go:117] "RemoveContainer" containerID="b0c7e82bbf1cdda56199e145746e63030d7af9eed339156786c1863210abc467" Jan 05 23:25:14 crc kubenswrapper[4910]: I0105 23:25:14.667019 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34e77073-5b5d-432d-b7c3-e1e9e35eac78-kube-api-access-29z5n" (OuterVolumeSpecName: "kube-api-access-29z5n") pod "34e77073-5b5d-432d-b7c3-e1e9e35eac78" (UID: "34e77073-5b5d-432d-b7c3-e1e9e35eac78"). InnerVolumeSpecName "kube-api-access-29z5n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:25:14 crc kubenswrapper[4910]: I0105 23:25:14.699863 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34e77073-5b5d-432d-b7c3-e1e9e35eac78-config-data" (OuterVolumeSpecName: "config-data") pod "34e77073-5b5d-432d-b7c3-e1e9e35eac78" (UID: "34e77073-5b5d-432d-b7c3-e1e9e35eac78"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:25:14 crc kubenswrapper[4910]: I0105 23:25:14.701031 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34e77073-5b5d-432d-b7c3-e1e9e35eac78-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "34e77073-5b5d-432d-b7c3-e1e9e35eac78" (UID: "34e77073-5b5d-432d-b7c3-e1e9e35eac78"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:25:14 crc kubenswrapper[4910]: I0105 23:25:14.704765 4910 scope.go:117] "RemoveContainer" containerID="b0c7e82bbf1cdda56199e145746e63030d7af9eed339156786c1863210abc467" Jan 05 23:25:14 crc kubenswrapper[4910]: E0105 23:25:14.707365 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b0c7e82bbf1cdda56199e145746e63030d7af9eed339156786c1863210abc467\": container with ID starting with b0c7e82bbf1cdda56199e145746e63030d7af9eed339156786c1863210abc467 not found: ID does not exist" containerID="b0c7e82bbf1cdda56199e145746e63030d7af9eed339156786c1863210abc467" Jan 05 23:25:14 crc kubenswrapper[4910]: I0105 23:25:14.707426 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0c7e82bbf1cdda56199e145746e63030d7af9eed339156786c1863210abc467"} err="failed to get container status \"b0c7e82bbf1cdda56199e145746e63030d7af9eed339156786c1863210abc467\": rpc error: code = NotFound desc = could not find container \"b0c7e82bbf1cdda56199e145746e63030d7af9eed339156786c1863210abc467\": container with ID starting with b0c7e82bbf1cdda56199e145746e63030d7af9eed339156786c1863210abc467 not found: ID does not exist" Jan 05 23:25:14 crc kubenswrapper[4910]: I0105 23:25:14.732368 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09fc0e9f-42fa-4177-aa14-680c36d4b679" path="/var/lib/kubelet/pods/09fc0e9f-42fa-4177-aa14-680c36d4b679/volumes" Jan 05 23:25:14 crc kubenswrapper[4910]: I0105 23:25:14.732982 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0ffce34a-e594-439d-a15e-b3740ebdbab8" path="/var/lib/kubelet/pods/0ffce34a-e594-439d-a15e-b3740ebdbab8/volumes" Jan 05 23:25:14 crc kubenswrapper[4910]: I0105 23:25:14.761043 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34e77073-5b5d-432d-b7c3-e1e9e35eac78-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:25:14 crc kubenswrapper[4910]: I0105 23:25:14.761079 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34e77073-5b5d-432d-b7c3-e1e9e35eac78-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:25:14 crc kubenswrapper[4910]: I0105 23:25:14.761092 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-29z5n\" (UniqueName: \"kubernetes.io/projected/34e77073-5b5d-432d-b7c3-e1e9e35eac78-kube-api-access-29z5n\") on node \"crc\" DevicePath \"\"" Jan 05 23:25:14 crc kubenswrapper[4910]: I0105 23:25:14.983541 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.983516927 podStartE2EDuration="2.983516927s" podCreationTimestamp="2026-01-05 23:25:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:25:14.66992907 +0000 UTC m=+5646.247426740" watchObservedRunningTime="2026-01-05 23:25:14.983516927 +0000 UTC m=+5646.561014607" Jan 05 23:25:14 crc kubenswrapper[4910]: I0105 23:25:14.993376 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 23:25:15 crc kubenswrapper[4910]: I0105 23:25:15.008861 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 23:25:15 crc kubenswrapper[4910]: I0105 23:25:15.019819 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 23:25:15 crc kubenswrapper[4910]: E0105 23:25:15.020199 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34e77073-5b5d-432d-b7c3-e1e9e35eac78" containerName="nova-scheduler-scheduler" Jan 05 23:25:15 crc kubenswrapper[4910]: I0105 23:25:15.020218 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="34e77073-5b5d-432d-b7c3-e1e9e35eac78" containerName="nova-scheduler-scheduler" Jan 05 23:25:15 crc kubenswrapper[4910]: I0105 23:25:15.020401 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="34e77073-5b5d-432d-b7c3-e1e9e35eac78" containerName="nova-scheduler-scheduler" Jan 05 23:25:15 crc kubenswrapper[4910]: I0105 23:25:15.020972 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 05 23:25:15 crc kubenswrapper[4910]: I0105 23:25:15.023843 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 05 23:25:15 crc kubenswrapper[4910]: I0105 23:25:15.027757 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 23:25:15 crc kubenswrapper[4910]: I0105 23:25:15.167398 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/776877a4-7fc0-449a-add4-ffe7357c90e2-config-data\") pod \"nova-scheduler-0\" (UID: \"776877a4-7fc0-449a-add4-ffe7357c90e2\") " pod="openstack/nova-scheduler-0" Jan 05 23:25:15 crc kubenswrapper[4910]: I0105 23:25:15.167726 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/776877a4-7fc0-449a-add4-ffe7357c90e2-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"776877a4-7fc0-449a-add4-ffe7357c90e2\") " pod="openstack/nova-scheduler-0" Jan 05 23:25:15 crc kubenswrapper[4910]: I0105 23:25:15.167775 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4hvnv\" (UniqueName: \"kubernetes.io/projected/776877a4-7fc0-449a-add4-ffe7357c90e2-kube-api-access-4hvnv\") pod \"nova-scheduler-0\" (UID: \"776877a4-7fc0-449a-add4-ffe7357c90e2\") " pod="openstack/nova-scheduler-0" Jan 05 23:25:15 crc kubenswrapper[4910]: I0105 23:25:15.269997 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/776877a4-7fc0-449a-add4-ffe7357c90e2-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"776877a4-7fc0-449a-add4-ffe7357c90e2\") " pod="openstack/nova-scheduler-0" Jan 05 23:25:15 crc kubenswrapper[4910]: I0105 23:25:15.270153 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4hvnv\" (UniqueName: \"kubernetes.io/projected/776877a4-7fc0-449a-add4-ffe7357c90e2-kube-api-access-4hvnv\") pod \"nova-scheduler-0\" (UID: \"776877a4-7fc0-449a-add4-ffe7357c90e2\") " pod="openstack/nova-scheduler-0" Jan 05 23:25:15 crc kubenswrapper[4910]: I0105 23:25:15.270367 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/776877a4-7fc0-449a-add4-ffe7357c90e2-config-data\") pod \"nova-scheduler-0\" (UID: \"776877a4-7fc0-449a-add4-ffe7357c90e2\") " pod="openstack/nova-scheduler-0" Jan 05 23:25:15 crc kubenswrapper[4910]: I0105 23:25:15.274804 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/776877a4-7fc0-449a-add4-ffe7357c90e2-config-data\") pod \"nova-scheduler-0\" (UID: \"776877a4-7fc0-449a-add4-ffe7357c90e2\") " pod="openstack/nova-scheduler-0" Jan 05 23:25:15 crc kubenswrapper[4910]: I0105 23:25:15.275686 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/776877a4-7fc0-449a-add4-ffe7357c90e2-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"776877a4-7fc0-449a-add4-ffe7357c90e2\") " pod="openstack/nova-scheduler-0" Jan 05 23:25:15 crc kubenswrapper[4910]: I0105 23:25:15.297337 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4hvnv\" (UniqueName: \"kubernetes.io/projected/776877a4-7fc0-449a-add4-ffe7357c90e2-kube-api-access-4hvnv\") pod \"nova-scheduler-0\" (UID: \"776877a4-7fc0-449a-add4-ffe7357c90e2\") " pod="openstack/nova-scheduler-0" Jan 05 23:25:15 crc kubenswrapper[4910]: I0105 23:25:15.341071 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 05 23:25:15 crc kubenswrapper[4910]: I0105 23:25:15.679082 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ee48c3fa-ab41-4e07-ba38-f7195ac868e0","Type":"ContainerStarted","Data":"bd9a4af489bbe2a0a2ab3d2c62ed2fefda8e5d987d13b2320d5a2ec5bf7fd378"} Jan 05 23:25:15 crc kubenswrapper[4910]: I0105 23:25:15.679399 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ee48c3fa-ab41-4e07-ba38-f7195ac868e0","Type":"ContainerStarted","Data":"95e71d4b9582ac3cf5d578aa5f7be38bded636fdd1b47b5ab6be7afa28853efc"} Jan 05 23:25:15 crc kubenswrapper[4910]: I0105 23:25:15.704471 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.704448023 podStartE2EDuration="2.704448023s" podCreationTimestamp="2026-01-05 23:25:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:25:15.697658825 +0000 UTC m=+5647.275156515" watchObservedRunningTime="2026-01-05 23:25:15.704448023 +0000 UTC m=+5647.281945693" Jan 05 23:25:15 crc kubenswrapper[4910]: I0105 23:25:15.792252 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 23:25:16 crc kubenswrapper[4910]: I0105 23:25:16.696234 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"776877a4-7fc0-449a-add4-ffe7357c90e2","Type":"ContainerStarted","Data":"d242f96dae00210855e1800d77cb6e0950b1c100e7d8a4ad68caec5aa3ed7fd9"} Jan 05 23:25:16 crc kubenswrapper[4910]: I0105 23:25:16.696729 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"776877a4-7fc0-449a-add4-ffe7357c90e2","Type":"ContainerStarted","Data":"3ebc72a4cd08b688dd86c725f996a755cced7f7528fa061ba4e5c5f8b5c3e2f7"} Jan 05 23:25:16 crc kubenswrapper[4910]: I0105 23:25:16.717935 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.717904905 podStartE2EDuration="2.717904905s" podCreationTimestamp="2026-01-05 23:25:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:25:16.713031144 +0000 UTC m=+5648.290528824" watchObservedRunningTime="2026-01-05 23:25:16.717904905 +0000 UTC m=+5648.295402615" Jan 05 23:25:16 crc kubenswrapper[4910]: I0105 23:25:16.737205 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34e77073-5b5d-432d-b7c3-e1e9e35eac78" path="/var/lib/kubelet/pods/34e77073-5b5d-432d-b7c3-e1e9e35eac78/volumes" Jan 05 23:25:18 crc kubenswrapper[4910]: I0105 23:25:18.344051 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 05 23:25:18 crc kubenswrapper[4910]: I0105 23:25:18.344481 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 05 23:25:20 crc kubenswrapper[4910]: I0105 23:25:20.341784 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 05 23:25:23 crc kubenswrapper[4910]: I0105 23:25:23.343586 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 05 23:25:23 crc kubenswrapper[4910]: I0105 23:25:23.344073 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 05 23:25:24 crc kubenswrapper[4910]: I0105 23:25:24.018330 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 05 23:25:24 crc kubenswrapper[4910]: I0105 23:25:24.018755 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 05 23:25:24 crc kubenswrapper[4910]: I0105 23:25:24.430364 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="02bc116b-3352-4eb9-9c44-1283f355e711" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.75:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 05 23:25:24 crc kubenswrapper[4910]: I0105 23:25:24.430475 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="02bc116b-3352-4eb9-9c44-1283f355e711" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.75:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 05 23:25:25 crc kubenswrapper[4910]: I0105 23:25:25.100449 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="ee48c3fa-ab41-4e07-ba38-f7195ac868e0" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.76:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 05 23:25:25 crc kubenswrapper[4910]: I0105 23:25:25.100535 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="ee48c3fa-ab41-4e07-ba38-f7195ac868e0" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.76:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 05 23:25:25 crc kubenswrapper[4910]: I0105 23:25:25.341551 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 05 23:25:25 crc kubenswrapper[4910]: I0105 23:25:25.392166 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 05 23:25:25 crc kubenswrapper[4910]: I0105 23:25:25.834741 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 05 23:25:33 crc kubenswrapper[4910]: I0105 23:25:33.347266 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 05 23:25:33 crc kubenswrapper[4910]: I0105 23:25:33.348004 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 05 23:25:33 crc kubenswrapper[4910]: I0105 23:25:33.352764 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 05 23:25:33 crc kubenswrapper[4910]: I0105 23:25:33.354047 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 05 23:25:34 crc kubenswrapper[4910]: I0105 23:25:34.024385 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 05 23:25:34 crc kubenswrapper[4910]: I0105 23:25:34.024933 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 05 23:25:34 crc kubenswrapper[4910]: I0105 23:25:34.025239 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 05 23:25:34 crc kubenswrapper[4910]: I0105 23:25:34.031571 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 05 23:25:34 crc kubenswrapper[4910]: I0105 23:25:34.922716 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 05 23:25:34 crc kubenswrapper[4910]: I0105 23:25:34.934627 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 05 23:25:35 crc kubenswrapper[4910]: I0105 23:25:35.173197 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7cb799b995-t6jp4"] Jan 05 23:25:35 crc kubenswrapper[4910]: I0105 23:25:35.174720 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7cb799b995-t6jp4" Jan 05 23:25:35 crc kubenswrapper[4910]: I0105 23:25:35.200280 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/65ba7e68-ccfa-4b14-9d34-4b8e72f422b5-ovsdbserver-sb\") pod \"dnsmasq-dns-7cb799b995-t6jp4\" (UID: \"65ba7e68-ccfa-4b14-9d34-4b8e72f422b5\") " pod="openstack/dnsmasq-dns-7cb799b995-t6jp4" Jan 05 23:25:35 crc kubenswrapper[4910]: I0105 23:25:35.200347 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/65ba7e68-ccfa-4b14-9d34-4b8e72f422b5-config\") pod \"dnsmasq-dns-7cb799b995-t6jp4\" (UID: \"65ba7e68-ccfa-4b14-9d34-4b8e72f422b5\") " pod="openstack/dnsmasq-dns-7cb799b995-t6jp4" Jan 05 23:25:35 crc kubenswrapper[4910]: I0105 23:25:35.200374 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/65ba7e68-ccfa-4b14-9d34-4b8e72f422b5-dns-svc\") pod \"dnsmasq-dns-7cb799b995-t6jp4\" (UID: \"65ba7e68-ccfa-4b14-9d34-4b8e72f422b5\") " pod="openstack/dnsmasq-dns-7cb799b995-t6jp4" Jan 05 23:25:35 crc kubenswrapper[4910]: I0105 23:25:35.200427 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mk287\" (UniqueName: \"kubernetes.io/projected/65ba7e68-ccfa-4b14-9d34-4b8e72f422b5-kube-api-access-mk287\") pod \"dnsmasq-dns-7cb799b995-t6jp4\" (UID: \"65ba7e68-ccfa-4b14-9d34-4b8e72f422b5\") " pod="openstack/dnsmasq-dns-7cb799b995-t6jp4" Jan 05 23:25:35 crc kubenswrapper[4910]: I0105 23:25:35.200447 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/65ba7e68-ccfa-4b14-9d34-4b8e72f422b5-ovsdbserver-nb\") pod \"dnsmasq-dns-7cb799b995-t6jp4\" (UID: \"65ba7e68-ccfa-4b14-9d34-4b8e72f422b5\") " pod="openstack/dnsmasq-dns-7cb799b995-t6jp4" Jan 05 23:25:35 crc kubenswrapper[4910]: I0105 23:25:35.205338 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7cb799b995-t6jp4"] Jan 05 23:25:35 crc kubenswrapper[4910]: I0105 23:25:35.302512 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/65ba7e68-ccfa-4b14-9d34-4b8e72f422b5-ovsdbserver-sb\") pod \"dnsmasq-dns-7cb799b995-t6jp4\" (UID: \"65ba7e68-ccfa-4b14-9d34-4b8e72f422b5\") " pod="openstack/dnsmasq-dns-7cb799b995-t6jp4" Jan 05 23:25:35 crc kubenswrapper[4910]: I0105 23:25:35.302588 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/65ba7e68-ccfa-4b14-9d34-4b8e72f422b5-config\") pod \"dnsmasq-dns-7cb799b995-t6jp4\" (UID: \"65ba7e68-ccfa-4b14-9d34-4b8e72f422b5\") " pod="openstack/dnsmasq-dns-7cb799b995-t6jp4" Jan 05 23:25:35 crc kubenswrapper[4910]: I0105 23:25:35.302626 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/65ba7e68-ccfa-4b14-9d34-4b8e72f422b5-dns-svc\") pod \"dnsmasq-dns-7cb799b995-t6jp4\" (UID: \"65ba7e68-ccfa-4b14-9d34-4b8e72f422b5\") " pod="openstack/dnsmasq-dns-7cb799b995-t6jp4" Jan 05 23:25:35 crc kubenswrapper[4910]: I0105 23:25:35.302684 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mk287\" (UniqueName: \"kubernetes.io/projected/65ba7e68-ccfa-4b14-9d34-4b8e72f422b5-kube-api-access-mk287\") pod \"dnsmasq-dns-7cb799b995-t6jp4\" (UID: \"65ba7e68-ccfa-4b14-9d34-4b8e72f422b5\") " pod="openstack/dnsmasq-dns-7cb799b995-t6jp4" Jan 05 23:25:35 crc kubenswrapper[4910]: I0105 23:25:35.302711 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/65ba7e68-ccfa-4b14-9d34-4b8e72f422b5-ovsdbserver-nb\") pod \"dnsmasq-dns-7cb799b995-t6jp4\" (UID: \"65ba7e68-ccfa-4b14-9d34-4b8e72f422b5\") " pod="openstack/dnsmasq-dns-7cb799b995-t6jp4" Jan 05 23:25:35 crc kubenswrapper[4910]: I0105 23:25:35.303650 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/65ba7e68-ccfa-4b14-9d34-4b8e72f422b5-ovsdbserver-sb\") pod \"dnsmasq-dns-7cb799b995-t6jp4\" (UID: \"65ba7e68-ccfa-4b14-9d34-4b8e72f422b5\") " pod="openstack/dnsmasq-dns-7cb799b995-t6jp4" Jan 05 23:25:35 crc kubenswrapper[4910]: I0105 23:25:35.303679 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/65ba7e68-ccfa-4b14-9d34-4b8e72f422b5-config\") pod \"dnsmasq-dns-7cb799b995-t6jp4\" (UID: \"65ba7e68-ccfa-4b14-9d34-4b8e72f422b5\") " pod="openstack/dnsmasq-dns-7cb799b995-t6jp4" Jan 05 23:25:35 crc kubenswrapper[4910]: I0105 23:25:35.303727 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/65ba7e68-ccfa-4b14-9d34-4b8e72f422b5-dns-svc\") pod \"dnsmasq-dns-7cb799b995-t6jp4\" (UID: \"65ba7e68-ccfa-4b14-9d34-4b8e72f422b5\") " pod="openstack/dnsmasq-dns-7cb799b995-t6jp4" Jan 05 23:25:35 crc kubenswrapper[4910]: I0105 23:25:35.303943 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/65ba7e68-ccfa-4b14-9d34-4b8e72f422b5-ovsdbserver-nb\") pod \"dnsmasq-dns-7cb799b995-t6jp4\" (UID: \"65ba7e68-ccfa-4b14-9d34-4b8e72f422b5\") " pod="openstack/dnsmasq-dns-7cb799b995-t6jp4" Jan 05 23:25:35 crc kubenswrapper[4910]: I0105 23:25:35.329245 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mk287\" (UniqueName: \"kubernetes.io/projected/65ba7e68-ccfa-4b14-9d34-4b8e72f422b5-kube-api-access-mk287\") pod \"dnsmasq-dns-7cb799b995-t6jp4\" (UID: \"65ba7e68-ccfa-4b14-9d34-4b8e72f422b5\") " pod="openstack/dnsmasq-dns-7cb799b995-t6jp4" Jan 05 23:25:35 crc kubenswrapper[4910]: I0105 23:25:35.505099 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7cb799b995-t6jp4" Jan 05 23:25:36 crc kubenswrapper[4910]: I0105 23:25:36.119145 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7cb799b995-t6jp4"] Jan 05 23:25:36 crc kubenswrapper[4910]: I0105 23:25:36.944002 4910 generic.go:334] "Generic (PLEG): container finished" podID="65ba7e68-ccfa-4b14-9d34-4b8e72f422b5" containerID="b0022a7e5e47042e1d6aebdfcbc68be824acfb80594513a9b59584d994fb52f8" exitCode=0 Jan 05 23:25:36 crc kubenswrapper[4910]: I0105 23:25:36.947429 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb799b995-t6jp4" event={"ID":"65ba7e68-ccfa-4b14-9d34-4b8e72f422b5","Type":"ContainerDied","Data":"b0022a7e5e47042e1d6aebdfcbc68be824acfb80594513a9b59584d994fb52f8"} Jan 05 23:25:36 crc kubenswrapper[4910]: I0105 23:25:36.947492 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb799b995-t6jp4" event={"ID":"65ba7e68-ccfa-4b14-9d34-4b8e72f422b5","Type":"ContainerStarted","Data":"78ad9f84bddd6d7a51421fdc2d4cf8d7e909435b82170c7ad81d424fbacff5b3"} Jan 05 23:25:37 crc kubenswrapper[4910]: I0105 23:25:37.960323 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb799b995-t6jp4" event={"ID":"65ba7e68-ccfa-4b14-9d34-4b8e72f422b5","Type":"ContainerStarted","Data":"c6300d99f48a9335ee14fcad9efc5a13e03c934c32cbb7708f24bca4ff283396"} Jan 05 23:25:37 crc kubenswrapper[4910]: I0105 23:25:37.960665 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7cb799b995-t6jp4" Jan 05 23:25:37 crc kubenswrapper[4910]: I0105 23:25:37.989015 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7cb799b995-t6jp4" podStartSLOduration=2.988986533 podStartE2EDuration="2.988986533s" podCreationTimestamp="2026-01-05 23:25:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:25:37.986612685 +0000 UTC m=+5669.564110395" watchObservedRunningTime="2026-01-05 23:25:37.988986533 +0000 UTC m=+5669.566484223" Jan 05 23:25:45 crc kubenswrapper[4910]: I0105 23:25:45.507592 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7cb799b995-t6jp4" Jan 05 23:25:45 crc kubenswrapper[4910]: I0105 23:25:45.640001 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b95997f7-6qm88"] Jan 05 23:25:45 crc kubenswrapper[4910]: I0105 23:25:45.641589 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5b95997f7-6qm88" podUID="a29badab-486f-44b9-a355-de373ae072a4" containerName="dnsmasq-dns" containerID="cri-o://25934dd8362b09843a7146cfbcbd0441a4e425d546607e1f36315f7bc87f94d2" gracePeriod=10 Jan 05 23:25:46 crc kubenswrapper[4910]: I0105 23:25:46.050760 4910 generic.go:334] "Generic (PLEG): container finished" podID="a29badab-486f-44b9-a355-de373ae072a4" containerID="25934dd8362b09843a7146cfbcbd0441a4e425d546607e1f36315f7bc87f94d2" exitCode=0 Jan 05 23:25:46 crc kubenswrapper[4910]: I0105 23:25:46.050855 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b95997f7-6qm88" event={"ID":"a29badab-486f-44b9-a355-de373ae072a4","Type":"ContainerDied","Data":"25934dd8362b09843a7146cfbcbd0441a4e425d546607e1f36315f7bc87f94d2"} Jan 05 23:25:46 crc kubenswrapper[4910]: I0105 23:25:46.214936 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b95997f7-6qm88" Jan 05 23:25:46 crc kubenswrapper[4910]: I0105 23:25:46.345939 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ts4fn\" (UniqueName: \"kubernetes.io/projected/a29badab-486f-44b9-a355-de373ae072a4-kube-api-access-ts4fn\") pod \"a29badab-486f-44b9-a355-de373ae072a4\" (UID: \"a29badab-486f-44b9-a355-de373ae072a4\") " Jan 05 23:25:46 crc kubenswrapper[4910]: I0105 23:25:46.346053 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a29badab-486f-44b9-a355-de373ae072a4-ovsdbserver-nb\") pod \"a29badab-486f-44b9-a355-de373ae072a4\" (UID: \"a29badab-486f-44b9-a355-de373ae072a4\") " Jan 05 23:25:46 crc kubenswrapper[4910]: I0105 23:25:46.346100 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a29badab-486f-44b9-a355-de373ae072a4-config\") pod \"a29badab-486f-44b9-a355-de373ae072a4\" (UID: \"a29badab-486f-44b9-a355-de373ae072a4\") " Jan 05 23:25:46 crc kubenswrapper[4910]: I0105 23:25:46.346235 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a29badab-486f-44b9-a355-de373ae072a4-ovsdbserver-sb\") pod \"a29badab-486f-44b9-a355-de373ae072a4\" (UID: \"a29badab-486f-44b9-a355-de373ae072a4\") " Jan 05 23:25:46 crc kubenswrapper[4910]: I0105 23:25:46.346253 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a29badab-486f-44b9-a355-de373ae072a4-dns-svc\") pod \"a29badab-486f-44b9-a355-de373ae072a4\" (UID: \"a29badab-486f-44b9-a355-de373ae072a4\") " Jan 05 23:25:46 crc kubenswrapper[4910]: I0105 23:25:46.389366 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a29badab-486f-44b9-a355-de373ae072a4-kube-api-access-ts4fn" (OuterVolumeSpecName: "kube-api-access-ts4fn") pod "a29badab-486f-44b9-a355-de373ae072a4" (UID: "a29badab-486f-44b9-a355-de373ae072a4"). InnerVolumeSpecName "kube-api-access-ts4fn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:25:46 crc kubenswrapper[4910]: I0105 23:25:46.449332 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ts4fn\" (UniqueName: \"kubernetes.io/projected/a29badab-486f-44b9-a355-de373ae072a4-kube-api-access-ts4fn\") on node \"crc\" DevicePath \"\"" Jan 05 23:25:46 crc kubenswrapper[4910]: I0105 23:25:46.453006 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a29badab-486f-44b9-a355-de373ae072a4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a29badab-486f-44b9-a355-de373ae072a4" (UID: "a29badab-486f-44b9-a355-de373ae072a4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:25:46 crc kubenswrapper[4910]: I0105 23:25:46.470221 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a29badab-486f-44b9-a355-de373ae072a4-config" (OuterVolumeSpecName: "config") pod "a29badab-486f-44b9-a355-de373ae072a4" (UID: "a29badab-486f-44b9-a355-de373ae072a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:25:46 crc kubenswrapper[4910]: I0105 23:25:46.496654 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a29badab-486f-44b9-a355-de373ae072a4-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a29badab-486f-44b9-a355-de373ae072a4" (UID: "a29badab-486f-44b9-a355-de373ae072a4"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:25:46 crc kubenswrapper[4910]: I0105 23:25:46.502700 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a29badab-486f-44b9-a355-de373ae072a4-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a29badab-486f-44b9-a355-de373ae072a4" (UID: "a29badab-486f-44b9-a355-de373ae072a4"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:25:46 crc kubenswrapper[4910]: I0105 23:25:46.551113 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a29badab-486f-44b9-a355-de373ae072a4-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 05 23:25:46 crc kubenswrapper[4910]: I0105 23:25:46.551192 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a29badab-486f-44b9-a355-de373ae072a4-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 05 23:25:46 crc kubenswrapper[4910]: I0105 23:25:46.551206 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a29badab-486f-44b9-a355-de373ae072a4-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 05 23:25:46 crc kubenswrapper[4910]: I0105 23:25:46.551218 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a29badab-486f-44b9-a355-de373ae072a4-config\") on node \"crc\" DevicePath \"\"" Jan 05 23:25:47 crc kubenswrapper[4910]: I0105 23:25:47.065911 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b95997f7-6qm88" event={"ID":"a29badab-486f-44b9-a355-de373ae072a4","Type":"ContainerDied","Data":"77eba28f3c294f6f5866ad0ac67e040e6c71a0edab3a47fbd36efba32a61d54b"} Jan 05 23:25:47 crc kubenswrapper[4910]: I0105 23:25:47.066232 4910 scope.go:117] "RemoveContainer" containerID="25934dd8362b09843a7146cfbcbd0441a4e425d546607e1f36315f7bc87f94d2" Jan 05 23:25:47 crc kubenswrapper[4910]: I0105 23:25:47.066057 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b95997f7-6qm88" Jan 05 23:25:47 crc kubenswrapper[4910]: I0105 23:25:47.098780 4910 scope.go:117] "RemoveContainer" containerID="e53e8ecdd20b5b155e16238fa027bc41386e1802b7caf5ebd2cbffc4c073b806" Jan 05 23:25:47 crc kubenswrapper[4910]: I0105 23:25:47.101413 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b95997f7-6qm88"] Jan 05 23:25:47 crc kubenswrapper[4910]: I0105 23:25:47.109872 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5b95997f7-6qm88"] Jan 05 23:25:48 crc kubenswrapper[4910]: I0105 23:25:48.738908 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a29badab-486f-44b9-a355-de373ae072a4" path="/var/lib/kubelet/pods/a29badab-486f-44b9-a355-de373ae072a4/volumes" Jan 05 23:25:49 crc kubenswrapper[4910]: I0105 23:25:49.709592 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-85jzg"] Jan 05 23:25:49 crc kubenswrapper[4910]: E0105 23:25:49.710489 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a29badab-486f-44b9-a355-de373ae072a4" containerName="dnsmasq-dns" Jan 05 23:25:49 crc kubenswrapper[4910]: I0105 23:25:49.710506 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a29badab-486f-44b9-a355-de373ae072a4" containerName="dnsmasq-dns" Jan 05 23:25:49 crc kubenswrapper[4910]: E0105 23:25:49.710531 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a29badab-486f-44b9-a355-de373ae072a4" containerName="init" Jan 05 23:25:49 crc kubenswrapper[4910]: I0105 23:25:49.710539 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a29badab-486f-44b9-a355-de373ae072a4" containerName="init" Jan 05 23:25:49 crc kubenswrapper[4910]: I0105 23:25:49.710710 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="a29badab-486f-44b9-a355-de373ae072a4" containerName="dnsmasq-dns" Jan 05 23:25:49 crc kubenswrapper[4910]: I0105 23:25:49.711865 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-85jzg" Jan 05 23:25:49 crc kubenswrapper[4910]: I0105 23:25:49.750786 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-85jzg"] Jan 05 23:25:49 crc kubenswrapper[4910]: I0105 23:25:49.818200 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-27qzq\" (UniqueName: \"kubernetes.io/projected/f9225c95-afc5-491e-8b18-0cb08272e8ae-kube-api-access-27qzq\") pod \"cinder-db-create-85jzg\" (UID: \"f9225c95-afc5-491e-8b18-0cb08272e8ae\") " pod="openstack/cinder-db-create-85jzg" Jan 05 23:25:49 crc kubenswrapper[4910]: I0105 23:25:49.818306 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f9225c95-afc5-491e-8b18-0cb08272e8ae-operator-scripts\") pod \"cinder-db-create-85jzg\" (UID: \"f9225c95-afc5-491e-8b18-0cb08272e8ae\") " pod="openstack/cinder-db-create-85jzg" Jan 05 23:25:49 crc kubenswrapper[4910]: I0105 23:25:49.821439 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-b44a-account-create-update-8k545"] Jan 05 23:25:49 crc kubenswrapper[4910]: I0105 23:25:49.823172 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-b44a-account-create-update-8k545" Jan 05 23:25:49 crc kubenswrapper[4910]: I0105 23:25:49.827678 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Jan 05 23:25:49 crc kubenswrapper[4910]: I0105 23:25:49.829555 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-b44a-account-create-update-8k545"] Jan 05 23:25:49 crc kubenswrapper[4910]: I0105 23:25:49.919690 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/23d3522b-b3e1-43f4-91ee-3617ee3b5a15-operator-scripts\") pod \"cinder-b44a-account-create-update-8k545\" (UID: \"23d3522b-b3e1-43f4-91ee-3617ee3b5a15\") " pod="openstack/cinder-b44a-account-create-update-8k545" Jan 05 23:25:49 crc kubenswrapper[4910]: I0105 23:25:49.919766 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5s97w\" (UniqueName: \"kubernetes.io/projected/23d3522b-b3e1-43f4-91ee-3617ee3b5a15-kube-api-access-5s97w\") pod \"cinder-b44a-account-create-update-8k545\" (UID: \"23d3522b-b3e1-43f4-91ee-3617ee3b5a15\") " pod="openstack/cinder-b44a-account-create-update-8k545" Jan 05 23:25:49 crc kubenswrapper[4910]: I0105 23:25:49.919882 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-27qzq\" (UniqueName: \"kubernetes.io/projected/f9225c95-afc5-491e-8b18-0cb08272e8ae-kube-api-access-27qzq\") pod \"cinder-db-create-85jzg\" (UID: \"f9225c95-afc5-491e-8b18-0cb08272e8ae\") " pod="openstack/cinder-db-create-85jzg" Jan 05 23:25:49 crc kubenswrapper[4910]: I0105 23:25:49.919930 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f9225c95-afc5-491e-8b18-0cb08272e8ae-operator-scripts\") pod \"cinder-db-create-85jzg\" (UID: \"f9225c95-afc5-491e-8b18-0cb08272e8ae\") " pod="openstack/cinder-db-create-85jzg" Jan 05 23:25:49 crc kubenswrapper[4910]: I0105 23:25:49.921311 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f9225c95-afc5-491e-8b18-0cb08272e8ae-operator-scripts\") pod \"cinder-db-create-85jzg\" (UID: \"f9225c95-afc5-491e-8b18-0cb08272e8ae\") " pod="openstack/cinder-db-create-85jzg" Jan 05 23:25:49 crc kubenswrapper[4910]: I0105 23:25:49.946003 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-27qzq\" (UniqueName: \"kubernetes.io/projected/f9225c95-afc5-491e-8b18-0cb08272e8ae-kube-api-access-27qzq\") pod \"cinder-db-create-85jzg\" (UID: \"f9225c95-afc5-491e-8b18-0cb08272e8ae\") " pod="openstack/cinder-db-create-85jzg" Jan 05 23:25:50 crc kubenswrapper[4910]: I0105 23:25:50.022032 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/23d3522b-b3e1-43f4-91ee-3617ee3b5a15-operator-scripts\") pod \"cinder-b44a-account-create-update-8k545\" (UID: \"23d3522b-b3e1-43f4-91ee-3617ee3b5a15\") " pod="openstack/cinder-b44a-account-create-update-8k545" Jan 05 23:25:50 crc kubenswrapper[4910]: I0105 23:25:50.022405 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5s97w\" (UniqueName: \"kubernetes.io/projected/23d3522b-b3e1-43f4-91ee-3617ee3b5a15-kube-api-access-5s97w\") pod \"cinder-b44a-account-create-update-8k545\" (UID: \"23d3522b-b3e1-43f4-91ee-3617ee3b5a15\") " pod="openstack/cinder-b44a-account-create-update-8k545" Jan 05 23:25:50 crc kubenswrapper[4910]: I0105 23:25:50.022896 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/23d3522b-b3e1-43f4-91ee-3617ee3b5a15-operator-scripts\") pod \"cinder-b44a-account-create-update-8k545\" (UID: \"23d3522b-b3e1-43f4-91ee-3617ee3b5a15\") " pod="openstack/cinder-b44a-account-create-update-8k545" Jan 05 23:25:50 crc kubenswrapper[4910]: I0105 23:25:50.042304 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-85jzg" Jan 05 23:25:50 crc kubenswrapper[4910]: I0105 23:25:50.047789 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5s97w\" (UniqueName: \"kubernetes.io/projected/23d3522b-b3e1-43f4-91ee-3617ee3b5a15-kube-api-access-5s97w\") pod \"cinder-b44a-account-create-update-8k545\" (UID: \"23d3522b-b3e1-43f4-91ee-3617ee3b5a15\") " pod="openstack/cinder-b44a-account-create-update-8k545" Jan 05 23:25:50 crc kubenswrapper[4910]: I0105 23:25:50.148666 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-b44a-account-create-update-8k545" Jan 05 23:25:50 crc kubenswrapper[4910]: W0105 23:25:50.631377 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf9225c95_afc5_491e_8b18_0cb08272e8ae.slice/crio-02ad113c7b3e25cac1f911de1539a5e53447ca828d530597eb7c7f8ba7ef6795 WatchSource:0}: Error finding container 02ad113c7b3e25cac1f911de1539a5e53447ca828d530597eb7c7f8ba7ef6795: Status 404 returned error can't find the container with id 02ad113c7b3e25cac1f911de1539a5e53447ca828d530597eb7c7f8ba7ef6795 Jan 05 23:25:50 crc kubenswrapper[4910]: I0105 23:25:50.639398 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-85jzg"] Jan 05 23:25:50 crc kubenswrapper[4910]: I0105 23:25:50.708359 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-b44a-account-create-update-8k545"] Jan 05 23:25:51 crc kubenswrapper[4910]: I0105 23:25:51.120581 4910 generic.go:334] "Generic (PLEG): container finished" podID="f9225c95-afc5-491e-8b18-0cb08272e8ae" containerID="ae8b83f5aa12df97c5b77ab0a4a4ac77e9ed811358667ee2f2457941015acc11" exitCode=0 Jan 05 23:25:51 crc kubenswrapper[4910]: I0105 23:25:51.120702 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-85jzg" event={"ID":"f9225c95-afc5-491e-8b18-0cb08272e8ae","Type":"ContainerDied","Data":"ae8b83f5aa12df97c5b77ab0a4a4ac77e9ed811358667ee2f2457941015acc11"} Jan 05 23:25:51 crc kubenswrapper[4910]: I0105 23:25:51.121075 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-85jzg" event={"ID":"f9225c95-afc5-491e-8b18-0cb08272e8ae","Type":"ContainerStarted","Data":"02ad113c7b3e25cac1f911de1539a5e53447ca828d530597eb7c7f8ba7ef6795"} Jan 05 23:25:51 crc kubenswrapper[4910]: I0105 23:25:51.122771 4910 generic.go:334] "Generic (PLEG): container finished" podID="23d3522b-b3e1-43f4-91ee-3617ee3b5a15" containerID="81f685f932e5b8ee2f68f44a7008394ac43664d536429818e11fa1588128ed62" exitCode=0 Jan 05 23:25:51 crc kubenswrapper[4910]: I0105 23:25:51.122834 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-b44a-account-create-update-8k545" event={"ID":"23d3522b-b3e1-43f4-91ee-3617ee3b5a15","Type":"ContainerDied","Data":"81f685f932e5b8ee2f68f44a7008394ac43664d536429818e11fa1588128ed62"} Jan 05 23:25:51 crc kubenswrapper[4910]: I0105 23:25:51.122895 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-b44a-account-create-update-8k545" event={"ID":"23d3522b-b3e1-43f4-91ee-3617ee3b5a15","Type":"ContainerStarted","Data":"89ee666b5ca53ea82818226a280d8cc7b5b518b3d5dc57feada2025442e4c005"} Jan 05 23:25:52 crc kubenswrapper[4910]: I0105 23:25:52.664617 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-85jzg" Jan 05 23:25:52 crc kubenswrapper[4910]: I0105 23:25:52.671460 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-b44a-account-create-update-8k545" Jan 05 23:25:52 crc kubenswrapper[4910]: I0105 23:25:52.783379 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5s97w\" (UniqueName: \"kubernetes.io/projected/23d3522b-b3e1-43f4-91ee-3617ee3b5a15-kube-api-access-5s97w\") pod \"23d3522b-b3e1-43f4-91ee-3617ee3b5a15\" (UID: \"23d3522b-b3e1-43f4-91ee-3617ee3b5a15\") " Jan 05 23:25:52 crc kubenswrapper[4910]: I0105 23:25:52.783506 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f9225c95-afc5-491e-8b18-0cb08272e8ae-operator-scripts\") pod \"f9225c95-afc5-491e-8b18-0cb08272e8ae\" (UID: \"f9225c95-afc5-491e-8b18-0cb08272e8ae\") " Jan 05 23:25:52 crc kubenswrapper[4910]: I0105 23:25:52.783578 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/23d3522b-b3e1-43f4-91ee-3617ee3b5a15-operator-scripts\") pod \"23d3522b-b3e1-43f4-91ee-3617ee3b5a15\" (UID: \"23d3522b-b3e1-43f4-91ee-3617ee3b5a15\") " Jan 05 23:25:52 crc kubenswrapper[4910]: I0105 23:25:52.783703 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-27qzq\" (UniqueName: \"kubernetes.io/projected/f9225c95-afc5-491e-8b18-0cb08272e8ae-kube-api-access-27qzq\") pod \"f9225c95-afc5-491e-8b18-0cb08272e8ae\" (UID: \"f9225c95-afc5-491e-8b18-0cb08272e8ae\") " Jan 05 23:25:52 crc kubenswrapper[4910]: I0105 23:25:52.785046 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f9225c95-afc5-491e-8b18-0cb08272e8ae-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f9225c95-afc5-491e-8b18-0cb08272e8ae" (UID: "f9225c95-afc5-491e-8b18-0cb08272e8ae"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:25:52 crc kubenswrapper[4910]: I0105 23:25:52.785323 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/23d3522b-b3e1-43f4-91ee-3617ee3b5a15-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "23d3522b-b3e1-43f4-91ee-3617ee3b5a15" (UID: "23d3522b-b3e1-43f4-91ee-3617ee3b5a15"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:25:52 crc kubenswrapper[4910]: I0105 23:25:52.795294 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f9225c95-afc5-491e-8b18-0cb08272e8ae-kube-api-access-27qzq" (OuterVolumeSpecName: "kube-api-access-27qzq") pod "f9225c95-afc5-491e-8b18-0cb08272e8ae" (UID: "f9225c95-afc5-491e-8b18-0cb08272e8ae"). InnerVolumeSpecName "kube-api-access-27qzq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:25:52 crc kubenswrapper[4910]: I0105 23:25:52.795347 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23d3522b-b3e1-43f4-91ee-3617ee3b5a15-kube-api-access-5s97w" (OuterVolumeSpecName: "kube-api-access-5s97w") pod "23d3522b-b3e1-43f4-91ee-3617ee3b5a15" (UID: "23d3522b-b3e1-43f4-91ee-3617ee3b5a15"). InnerVolumeSpecName "kube-api-access-5s97w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:25:52 crc kubenswrapper[4910]: I0105 23:25:52.885875 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5s97w\" (UniqueName: \"kubernetes.io/projected/23d3522b-b3e1-43f4-91ee-3617ee3b5a15-kube-api-access-5s97w\") on node \"crc\" DevicePath \"\"" Jan 05 23:25:52 crc kubenswrapper[4910]: I0105 23:25:52.885931 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f9225c95-afc5-491e-8b18-0cb08272e8ae-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:25:52 crc kubenswrapper[4910]: I0105 23:25:52.885940 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/23d3522b-b3e1-43f4-91ee-3617ee3b5a15-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:25:52 crc kubenswrapper[4910]: I0105 23:25:52.885948 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-27qzq\" (UniqueName: \"kubernetes.io/projected/f9225c95-afc5-491e-8b18-0cb08272e8ae-kube-api-access-27qzq\") on node \"crc\" DevicePath \"\"" Jan 05 23:25:53 crc kubenswrapper[4910]: I0105 23:25:53.152891 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-b44a-account-create-update-8k545" Jan 05 23:25:53 crc kubenswrapper[4910]: I0105 23:25:53.152875 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-b44a-account-create-update-8k545" event={"ID":"23d3522b-b3e1-43f4-91ee-3617ee3b5a15","Type":"ContainerDied","Data":"89ee666b5ca53ea82818226a280d8cc7b5b518b3d5dc57feada2025442e4c005"} Jan 05 23:25:53 crc kubenswrapper[4910]: I0105 23:25:53.153070 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="89ee666b5ca53ea82818226a280d8cc7b5b518b3d5dc57feada2025442e4c005" Jan 05 23:25:53 crc kubenswrapper[4910]: I0105 23:25:53.155706 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-85jzg" event={"ID":"f9225c95-afc5-491e-8b18-0cb08272e8ae","Type":"ContainerDied","Data":"02ad113c7b3e25cac1f911de1539a5e53447ca828d530597eb7c7f8ba7ef6795"} Jan 05 23:25:53 crc kubenswrapper[4910]: I0105 23:25:53.155768 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="02ad113c7b3e25cac1f911de1539a5e53447ca828d530597eb7c7f8ba7ef6795" Jan 05 23:25:53 crc kubenswrapper[4910]: I0105 23:25:53.155834 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-85jzg" Jan 05 23:25:54 crc kubenswrapper[4910]: I0105 23:25:54.998692 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-8hflm"] Jan 05 23:25:54 crc kubenswrapper[4910]: E0105 23:25:54.999571 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9225c95-afc5-491e-8b18-0cb08272e8ae" containerName="mariadb-database-create" Jan 05 23:25:54 crc kubenswrapper[4910]: I0105 23:25:54.999618 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9225c95-afc5-491e-8b18-0cb08272e8ae" containerName="mariadb-database-create" Jan 05 23:25:54 crc kubenswrapper[4910]: E0105 23:25:54.999667 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23d3522b-b3e1-43f4-91ee-3617ee3b5a15" containerName="mariadb-account-create-update" Jan 05 23:25:54 crc kubenswrapper[4910]: I0105 23:25:54.999681 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="23d3522b-b3e1-43f4-91ee-3617ee3b5a15" containerName="mariadb-account-create-update" Jan 05 23:25:55 crc kubenswrapper[4910]: I0105 23:25:55.000002 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="23d3522b-b3e1-43f4-91ee-3617ee3b5a15" containerName="mariadb-account-create-update" Jan 05 23:25:55 crc kubenswrapper[4910]: I0105 23:25:55.000041 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9225c95-afc5-491e-8b18-0cb08272e8ae" containerName="mariadb-database-create" Jan 05 23:25:55 crc kubenswrapper[4910]: I0105 23:25:55.001187 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-8hflm" Jan 05 23:25:55 crc kubenswrapper[4910]: I0105 23:25:55.009172 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 05 23:25:55 crc kubenswrapper[4910]: I0105 23:25:55.009399 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 05 23:25:55 crc kubenswrapper[4910]: I0105 23:25:55.009443 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-4tqpn" Jan 05 23:25:55 crc kubenswrapper[4910]: I0105 23:25:55.011177 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-8hflm"] Jan 05 23:25:55 crc kubenswrapper[4910]: I0105 23:25:55.131340 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-scripts\") pod \"cinder-db-sync-8hflm\" (UID: \"20e5ef58-99f5-44ad-bcbd-310ab1052ce2\") " pod="openstack/cinder-db-sync-8hflm" Jan 05 23:25:55 crc kubenswrapper[4910]: I0105 23:25:55.131385 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-db-sync-config-data\") pod \"cinder-db-sync-8hflm\" (UID: \"20e5ef58-99f5-44ad-bcbd-310ab1052ce2\") " pod="openstack/cinder-db-sync-8hflm" Jan 05 23:25:55 crc kubenswrapper[4910]: I0105 23:25:55.131597 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-config-data\") pod \"cinder-db-sync-8hflm\" (UID: \"20e5ef58-99f5-44ad-bcbd-310ab1052ce2\") " pod="openstack/cinder-db-sync-8hflm" Jan 05 23:25:55 crc kubenswrapper[4910]: I0105 23:25:55.131825 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pchb7\" (UniqueName: \"kubernetes.io/projected/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-kube-api-access-pchb7\") pod \"cinder-db-sync-8hflm\" (UID: \"20e5ef58-99f5-44ad-bcbd-310ab1052ce2\") " pod="openstack/cinder-db-sync-8hflm" Jan 05 23:25:55 crc kubenswrapper[4910]: I0105 23:25:55.131856 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-etc-machine-id\") pod \"cinder-db-sync-8hflm\" (UID: \"20e5ef58-99f5-44ad-bcbd-310ab1052ce2\") " pod="openstack/cinder-db-sync-8hflm" Jan 05 23:25:55 crc kubenswrapper[4910]: I0105 23:25:55.131916 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-combined-ca-bundle\") pod \"cinder-db-sync-8hflm\" (UID: \"20e5ef58-99f5-44ad-bcbd-310ab1052ce2\") " pod="openstack/cinder-db-sync-8hflm" Jan 05 23:25:55 crc kubenswrapper[4910]: I0105 23:25:55.234064 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pchb7\" (UniqueName: \"kubernetes.io/projected/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-kube-api-access-pchb7\") pod \"cinder-db-sync-8hflm\" (UID: \"20e5ef58-99f5-44ad-bcbd-310ab1052ce2\") " pod="openstack/cinder-db-sync-8hflm" Jan 05 23:25:55 crc kubenswrapper[4910]: I0105 23:25:55.234125 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-etc-machine-id\") pod \"cinder-db-sync-8hflm\" (UID: \"20e5ef58-99f5-44ad-bcbd-310ab1052ce2\") " pod="openstack/cinder-db-sync-8hflm" Jan 05 23:25:55 crc kubenswrapper[4910]: I0105 23:25:55.234154 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-combined-ca-bundle\") pod \"cinder-db-sync-8hflm\" (UID: \"20e5ef58-99f5-44ad-bcbd-310ab1052ce2\") " pod="openstack/cinder-db-sync-8hflm" Jan 05 23:25:55 crc kubenswrapper[4910]: I0105 23:25:55.234191 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-scripts\") pod \"cinder-db-sync-8hflm\" (UID: \"20e5ef58-99f5-44ad-bcbd-310ab1052ce2\") " pod="openstack/cinder-db-sync-8hflm" Jan 05 23:25:55 crc kubenswrapper[4910]: I0105 23:25:55.234213 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-db-sync-config-data\") pod \"cinder-db-sync-8hflm\" (UID: \"20e5ef58-99f5-44ad-bcbd-310ab1052ce2\") " pod="openstack/cinder-db-sync-8hflm" Jan 05 23:25:55 crc kubenswrapper[4910]: I0105 23:25:55.234273 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-config-data\") pod \"cinder-db-sync-8hflm\" (UID: \"20e5ef58-99f5-44ad-bcbd-310ab1052ce2\") " pod="openstack/cinder-db-sync-8hflm" Jan 05 23:25:55 crc kubenswrapper[4910]: I0105 23:25:55.235297 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-etc-machine-id\") pod \"cinder-db-sync-8hflm\" (UID: \"20e5ef58-99f5-44ad-bcbd-310ab1052ce2\") " pod="openstack/cinder-db-sync-8hflm" Jan 05 23:25:55 crc kubenswrapper[4910]: I0105 23:25:55.240053 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-scripts\") pod \"cinder-db-sync-8hflm\" (UID: \"20e5ef58-99f5-44ad-bcbd-310ab1052ce2\") " pod="openstack/cinder-db-sync-8hflm" Jan 05 23:25:55 crc kubenswrapper[4910]: I0105 23:25:55.240141 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-db-sync-config-data\") pod \"cinder-db-sync-8hflm\" (UID: \"20e5ef58-99f5-44ad-bcbd-310ab1052ce2\") " pod="openstack/cinder-db-sync-8hflm" Jan 05 23:25:55 crc kubenswrapper[4910]: I0105 23:25:55.240556 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-config-data\") pod \"cinder-db-sync-8hflm\" (UID: \"20e5ef58-99f5-44ad-bcbd-310ab1052ce2\") " pod="openstack/cinder-db-sync-8hflm" Jan 05 23:25:55 crc kubenswrapper[4910]: I0105 23:25:55.246490 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-combined-ca-bundle\") pod \"cinder-db-sync-8hflm\" (UID: \"20e5ef58-99f5-44ad-bcbd-310ab1052ce2\") " pod="openstack/cinder-db-sync-8hflm" Jan 05 23:25:55 crc kubenswrapper[4910]: I0105 23:25:55.269653 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pchb7\" (UniqueName: \"kubernetes.io/projected/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-kube-api-access-pchb7\") pod \"cinder-db-sync-8hflm\" (UID: \"20e5ef58-99f5-44ad-bcbd-310ab1052ce2\") " pod="openstack/cinder-db-sync-8hflm" Jan 05 23:25:55 crc kubenswrapper[4910]: I0105 23:25:55.338815 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-8hflm" Jan 05 23:25:55 crc kubenswrapper[4910]: I0105 23:25:55.860443 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-8hflm"] Jan 05 23:25:55 crc kubenswrapper[4910]: W0105 23:25:55.867387 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod20e5ef58_99f5_44ad_bcbd_310ab1052ce2.slice/crio-0761c681c44276193c63db18809c5983d19f5715a8e314b44e8cb966515819c4 WatchSource:0}: Error finding container 0761c681c44276193c63db18809c5983d19f5715a8e314b44e8cb966515819c4: Status 404 returned error can't find the container with id 0761c681c44276193c63db18809c5983d19f5715a8e314b44e8cb966515819c4 Jan 05 23:25:56 crc kubenswrapper[4910]: I0105 23:25:56.181762 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-8hflm" event={"ID":"20e5ef58-99f5-44ad-bcbd-310ab1052ce2","Type":"ContainerStarted","Data":"0761c681c44276193c63db18809c5983d19f5715a8e314b44e8cb966515819c4"} Jan 05 23:25:57 crc kubenswrapper[4910]: I0105 23:25:57.202209 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-8hflm" event={"ID":"20e5ef58-99f5-44ad-bcbd-310ab1052ce2","Type":"ContainerStarted","Data":"89e008f163364a429cd3cadace1419f216793c201556df0af9a91dfa21e5469e"} Jan 05 23:25:57 crc kubenswrapper[4910]: I0105 23:25:57.232464 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-8hflm" podStartSLOduration=3.232424269 podStartE2EDuration="3.232424269s" podCreationTimestamp="2026-01-05 23:25:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:25:57.227687412 +0000 UTC m=+5688.805185092" watchObservedRunningTime="2026-01-05 23:25:57.232424269 +0000 UTC m=+5688.809921979" Jan 05 23:25:59 crc kubenswrapper[4910]: I0105 23:25:59.226924 4910 generic.go:334] "Generic (PLEG): container finished" podID="20e5ef58-99f5-44ad-bcbd-310ab1052ce2" containerID="89e008f163364a429cd3cadace1419f216793c201556df0af9a91dfa21e5469e" exitCode=0 Jan 05 23:25:59 crc kubenswrapper[4910]: I0105 23:25:59.227034 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-8hflm" event={"ID":"20e5ef58-99f5-44ad-bcbd-310ab1052ce2","Type":"ContainerDied","Data":"89e008f163364a429cd3cadace1419f216793c201556df0af9a91dfa21e5469e"} Jan 05 23:26:00 crc kubenswrapper[4910]: I0105 23:26:00.716827 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-8hflm" Jan 05 23:26:00 crc kubenswrapper[4910]: I0105 23:26:00.856435 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-db-sync-config-data\") pod \"20e5ef58-99f5-44ad-bcbd-310ab1052ce2\" (UID: \"20e5ef58-99f5-44ad-bcbd-310ab1052ce2\") " Jan 05 23:26:00 crc kubenswrapper[4910]: I0105 23:26:00.856580 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-config-data\") pod \"20e5ef58-99f5-44ad-bcbd-310ab1052ce2\" (UID: \"20e5ef58-99f5-44ad-bcbd-310ab1052ce2\") " Jan 05 23:26:00 crc kubenswrapper[4910]: I0105 23:26:00.856773 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-scripts\") pod \"20e5ef58-99f5-44ad-bcbd-310ab1052ce2\" (UID: \"20e5ef58-99f5-44ad-bcbd-310ab1052ce2\") " Jan 05 23:26:00 crc kubenswrapper[4910]: I0105 23:26:00.856958 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-etc-machine-id\") pod \"20e5ef58-99f5-44ad-bcbd-310ab1052ce2\" (UID: \"20e5ef58-99f5-44ad-bcbd-310ab1052ce2\") " Jan 05 23:26:00 crc kubenswrapper[4910]: I0105 23:26:00.857018 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-combined-ca-bundle\") pod \"20e5ef58-99f5-44ad-bcbd-310ab1052ce2\" (UID: \"20e5ef58-99f5-44ad-bcbd-310ab1052ce2\") " Jan 05 23:26:00 crc kubenswrapper[4910]: I0105 23:26:00.857116 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pchb7\" (UniqueName: \"kubernetes.io/projected/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-kube-api-access-pchb7\") pod \"20e5ef58-99f5-44ad-bcbd-310ab1052ce2\" (UID: \"20e5ef58-99f5-44ad-bcbd-310ab1052ce2\") " Jan 05 23:26:00 crc kubenswrapper[4910]: I0105 23:26:00.857364 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "20e5ef58-99f5-44ad-bcbd-310ab1052ce2" (UID: "20e5ef58-99f5-44ad-bcbd-310ab1052ce2"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 23:26:00 crc kubenswrapper[4910]: I0105 23:26:00.859056 4910 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:00 crc kubenswrapper[4910]: I0105 23:26:00.863153 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "20e5ef58-99f5-44ad-bcbd-310ab1052ce2" (UID: "20e5ef58-99f5-44ad-bcbd-310ab1052ce2"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:26:00 crc kubenswrapper[4910]: I0105 23:26:00.868490 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-scripts" (OuterVolumeSpecName: "scripts") pod "20e5ef58-99f5-44ad-bcbd-310ab1052ce2" (UID: "20e5ef58-99f5-44ad-bcbd-310ab1052ce2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:26:00 crc kubenswrapper[4910]: I0105 23:26:00.868502 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-kube-api-access-pchb7" (OuterVolumeSpecName: "kube-api-access-pchb7") pod "20e5ef58-99f5-44ad-bcbd-310ab1052ce2" (UID: "20e5ef58-99f5-44ad-bcbd-310ab1052ce2"). InnerVolumeSpecName "kube-api-access-pchb7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:26:00 crc kubenswrapper[4910]: I0105 23:26:00.907546 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "20e5ef58-99f5-44ad-bcbd-310ab1052ce2" (UID: "20e5ef58-99f5-44ad-bcbd-310ab1052ce2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:26:00 crc kubenswrapper[4910]: I0105 23:26:00.915954 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-config-data" (OuterVolumeSpecName: "config-data") pod "20e5ef58-99f5-44ad-bcbd-310ab1052ce2" (UID: "20e5ef58-99f5-44ad-bcbd-310ab1052ce2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:26:00 crc kubenswrapper[4910]: I0105 23:26:00.960686 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:00 crc kubenswrapper[4910]: I0105 23:26:00.960723 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:00 crc kubenswrapper[4910]: I0105 23:26:00.960735 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:00 crc kubenswrapper[4910]: I0105 23:26:00.960746 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pchb7\" (UniqueName: \"kubernetes.io/projected/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-kube-api-access-pchb7\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:00 crc kubenswrapper[4910]: I0105 23:26:00.960755 4910 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/20e5ef58-99f5-44ad-bcbd-310ab1052ce2-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.256014 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-8hflm" event={"ID":"20e5ef58-99f5-44ad-bcbd-310ab1052ce2","Type":"ContainerDied","Data":"0761c681c44276193c63db18809c5983d19f5715a8e314b44e8cb966515819c4"} Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.256062 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0761c681c44276193c63db18809c5983d19f5715a8e314b44e8cb966515819c4" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.256168 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-8hflm" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.711791 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6cb64bf69-8dbkn"] Jan 05 23:26:01 crc kubenswrapper[4910]: E0105 23:26:01.712469 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20e5ef58-99f5-44ad-bcbd-310ab1052ce2" containerName="cinder-db-sync" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.712483 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="20e5ef58-99f5-44ad-bcbd-310ab1052ce2" containerName="cinder-db-sync" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.712697 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="20e5ef58-99f5-44ad-bcbd-310ab1052ce2" containerName="cinder-db-sync" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.714284 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cb64bf69-8dbkn" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.731956 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6cb64bf69-8dbkn"] Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.787444 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/030ee9ad-b562-41c2-b1e2-f5be9e4e13b0-config\") pod \"dnsmasq-dns-6cb64bf69-8dbkn\" (UID: \"030ee9ad-b562-41c2-b1e2-f5be9e4e13b0\") " pod="openstack/dnsmasq-dns-6cb64bf69-8dbkn" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.787534 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vmrx8\" (UniqueName: \"kubernetes.io/projected/030ee9ad-b562-41c2-b1e2-f5be9e4e13b0-kube-api-access-vmrx8\") pod \"dnsmasq-dns-6cb64bf69-8dbkn\" (UID: \"030ee9ad-b562-41c2-b1e2-f5be9e4e13b0\") " pod="openstack/dnsmasq-dns-6cb64bf69-8dbkn" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.787581 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/030ee9ad-b562-41c2-b1e2-f5be9e4e13b0-dns-svc\") pod \"dnsmasq-dns-6cb64bf69-8dbkn\" (UID: \"030ee9ad-b562-41c2-b1e2-f5be9e4e13b0\") " pod="openstack/dnsmasq-dns-6cb64bf69-8dbkn" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.787631 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/030ee9ad-b562-41c2-b1e2-f5be9e4e13b0-ovsdbserver-nb\") pod \"dnsmasq-dns-6cb64bf69-8dbkn\" (UID: \"030ee9ad-b562-41c2-b1e2-f5be9e4e13b0\") " pod="openstack/dnsmasq-dns-6cb64bf69-8dbkn" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.787656 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/030ee9ad-b562-41c2-b1e2-f5be9e4e13b0-ovsdbserver-sb\") pod \"dnsmasq-dns-6cb64bf69-8dbkn\" (UID: \"030ee9ad-b562-41c2-b1e2-f5be9e4e13b0\") " pod="openstack/dnsmasq-dns-6cb64bf69-8dbkn" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.889614 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/030ee9ad-b562-41c2-b1e2-f5be9e4e13b0-dns-svc\") pod \"dnsmasq-dns-6cb64bf69-8dbkn\" (UID: \"030ee9ad-b562-41c2-b1e2-f5be9e4e13b0\") " pod="openstack/dnsmasq-dns-6cb64bf69-8dbkn" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.889723 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/030ee9ad-b562-41c2-b1e2-f5be9e4e13b0-ovsdbserver-nb\") pod \"dnsmasq-dns-6cb64bf69-8dbkn\" (UID: \"030ee9ad-b562-41c2-b1e2-f5be9e4e13b0\") " pod="openstack/dnsmasq-dns-6cb64bf69-8dbkn" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.889758 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/030ee9ad-b562-41c2-b1e2-f5be9e4e13b0-ovsdbserver-sb\") pod \"dnsmasq-dns-6cb64bf69-8dbkn\" (UID: \"030ee9ad-b562-41c2-b1e2-f5be9e4e13b0\") " pod="openstack/dnsmasq-dns-6cb64bf69-8dbkn" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.889889 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/030ee9ad-b562-41c2-b1e2-f5be9e4e13b0-config\") pod \"dnsmasq-dns-6cb64bf69-8dbkn\" (UID: \"030ee9ad-b562-41c2-b1e2-f5be9e4e13b0\") " pod="openstack/dnsmasq-dns-6cb64bf69-8dbkn" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.889949 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vmrx8\" (UniqueName: \"kubernetes.io/projected/030ee9ad-b562-41c2-b1e2-f5be9e4e13b0-kube-api-access-vmrx8\") pod \"dnsmasq-dns-6cb64bf69-8dbkn\" (UID: \"030ee9ad-b562-41c2-b1e2-f5be9e4e13b0\") " pod="openstack/dnsmasq-dns-6cb64bf69-8dbkn" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.891439 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/030ee9ad-b562-41c2-b1e2-f5be9e4e13b0-dns-svc\") pod \"dnsmasq-dns-6cb64bf69-8dbkn\" (UID: \"030ee9ad-b562-41c2-b1e2-f5be9e4e13b0\") " pod="openstack/dnsmasq-dns-6cb64bf69-8dbkn" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.892015 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/030ee9ad-b562-41c2-b1e2-f5be9e4e13b0-ovsdbserver-nb\") pod \"dnsmasq-dns-6cb64bf69-8dbkn\" (UID: \"030ee9ad-b562-41c2-b1e2-f5be9e4e13b0\") " pod="openstack/dnsmasq-dns-6cb64bf69-8dbkn" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.892559 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/030ee9ad-b562-41c2-b1e2-f5be9e4e13b0-ovsdbserver-sb\") pod \"dnsmasq-dns-6cb64bf69-8dbkn\" (UID: \"030ee9ad-b562-41c2-b1e2-f5be9e4e13b0\") " pod="openstack/dnsmasq-dns-6cb64bf69-8dbkn" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.893085 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/030ee9ad-b562-41c2-b1e2-f5be9e4e13b0-config\") pod \"dnsmasq-dns-6cb64bf69-8dbkn\" (UID: \"030ee9ad-b562-41c2-b1e2-f5be9e4e13b0\") " pod="openstack/dnsmasq-dns-6cb64bf69-8dbkn" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.916607 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.918662 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.926112 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.926469 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-4tqpn" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.926622 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.926745 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.928726 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vmrx8\" (UniqueName: \"kubernetes.io/projected/030ee9ad-b562-41c2-b1e2-f5be9e4e13b0-kube-api-access-vmrx8\") pod \"dnsmasq-dns-6cb64bf69-8dbkn\" (UID: \"030ee9ad-b562-41c2-b1e2-f5be9e4e13b0\") " pod="openstack/dnsmasq-dns-6cb64bf69-8dbkn" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.944883 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.993092 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cd8216dd-10b4-41f8-bc8f-ef7437020264-config-data-custom\") pod \"cinder-api-0\" (UID: \"cd8216dd-10b4-41f8-bc8f-ef7437020264\") " pod="openstack/cinder-api-0" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.993289 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cd8216dd-10b4-41f8-bc8f-ef7437020264-scripts\") pod \"cinder-api-0\" (UID: \"cd8216dd-10b4-41f8-bc8f-ef7437020264\") " pod="openstack/cinder-api-0" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.993341 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd8216dd-10b4-41f8-bc8f-ef7437020264-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"cd8216dd-10b4-41f8-bc8f-ef7437020264\") " pod="openstack/cinder-api-0" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.993396 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd8216dd-10b4-41f8-bc8f-ef7437020264-logs\") pod \"cinder-api-0\" (UID: \"cd8216dd-10b4-41f8-bc8f-ef7437020264\") " pod="openstack/cinder-api-0" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.993433 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd8216dd-10b4-41f8-bc8f-ef7437020264-config-data\") pod \"cinder-api-0\" (UID: \"cd8216dd-10b4-41f8-bc8f-ef7437020264\") " pod="openstack/cinder-api-0" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.993476 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cd8216dd-10b4-41f8-bc8f-ef7437020264-etc-machine-id\") pod \"cinder-api-0\" (UID: \"cd8216dd-10b4-41f8-bc8f-ef7437020264\") " pod="openstack/cinder-api-0" Jan 05 23:26:01 crc kubenswrapper[4910]: I0105 23:26:01.993524 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sxsgh\" (UniqueName: \"kubernetes.io/projected/cd8216dd-10b4-41f8-bc8f-ef7437020264-kube-api-access-sxsgh\") pod \"cinder-api-0\" (UID: \"cd8216dd-10b4-41f8-bc8f-ef7437020264\") " pod="openstack/cinder-api-0" Jan 05 23:26:02 crc kubenswrapper[4910]: I0105 23:26:02.049541 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cb64bf69-8dbkn" Jan 05 23:26:02 crc kubenswrapper[4910]: I0105 23:26:02.095258 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cd8216dd-10b4-41f8-bc8f-ef7437020264-config-data-custom\") pod \"cinder-api-0\" (UID: \"cd8216dd-10b4-41f8-bc8f-ef7437020264\") " pod="openstack/cinder-api-0" Jan 05 23:26:02 crc kubenswrapper[4910]: I0105 23:26:02.095309 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cd8216dd-10b4-41f8-bc8f-ef7437020264-scripts\") pod \"cinder-api-0\" (UID: \"cd8216dd-10b4-41f8-bc8f-ef7437020264\") " pod="openstack/cinder-api-0" Jan 05 23:26:02 crc kubenswrapper[4910]: I0105 23:26:02.095348 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd8216dd-10b4-41f8-bc8f-ef7437020264-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"cd8216dd-10b4-41f8-bc8f-ef7437020264\") " pod="openstack/cinder-api-0" Jan 05 23:26:02 crc kubenswrapper[4910]: I0105 23:26:02.095380 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd8216dd-10b4-41f8-bc8f-ef7437020264-logs\") pod \"cinder-api-0\" (UID: \"cd8216dd-10b4-41f8-bc8f-ef7437020264\") " pod="openstack/cinder-api-0" Jan 05 23:26:02 crc kubenswrapper[4910]: I0105 23:26:02.095405 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd8216dd-10b4-41f8-bc8f-ef7437020264-config-data\") pod \"cinder-api-0\" (UID: \"cd8216dd-10b4-41f8-bc8f-ef7437020264\") " pod="openstack/cinder-api-0" Jan 05 23:26:02 crc kubenswrapper[4910]: I0105 23:26:02.095444 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cd8216dd-10b4-41f8-bc8f-ef7437020264-etc-machine-id\") pod \"cinder-api-0\" (UID: \"cd8216dd-10b4-41f8-bc8f-ef7437020264\") " pod="openstack/cinder-api-0" Jan 05 23:26:02 crc kubenswrapper[4910]: I0105 23:26:02.095468 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sxsgh\" (UniqueName: \"kubernetes.io/projected/cd8216dd-10b4-41f8-bc8f-ef7437020264-kube-api-access-sxsgh\") pod \"cinder-api-0\" (UID: \"cd8216dd-10b4-41f8-bc8f-ef7437020264\") " pod="openstack/cinder-api-0" Jan 05 23:26:02 crc kubenswrapper[4910]: I0105 23:26:02.096363 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cd8216dd-10b4-41f8-bc8f-ef7437020264-etc-machine-id\") pod \"cinder-api-0\" (UID: \"cd8216dd-10b4-41f8-bc8f-ef7437020264\") " pod="openstack/cinder-api-0" Jan 05 23:26:02 crc kubenswrapper[4910]: I0105 23:26:02.096966 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd8216dd-10b4-41f8-bc8f-ef7437020264-logs\") pod \"cinder-api-0\" (UID: \"cd8216dd-10b4-41f8-bc8f-ef7437020264\") " pod="openstack/cinder-api-0" Jan 05 23:26:02 crc kubenswrapper[4910]: I0105 23:26:02.107980 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cd8216dd-10b4-41f8-bc8f-ef7437020264-config-data-custom\") pod \"cinder-api-0\" (UID: \"cd8216dd-10b4-41f8-bc8f-ef7437020264\") " pod="openstack/cinder-api-0" Jan 05 23:26:02 crc kubenswrapper[4910]: I0105 23:26:02.113189 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd8216dd-10b4-41f8-bc8f-ef7437020264-config-data\") pod \"cinder-api-0\" (UID: \"cd8216dd-10b4-41f8-bc8f-ef7437020264\") " pod="openstack/cinder-api-0" Jan 05 23:26:02 crc kubenswrapper[4910]: I0105 23:26:02.113757 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd8216dd-10b4-41f8-bc8f-ef7437020264-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"cd8216dd-10b4-41f8-bc8f-ef7437020264\") " pod="openstack/cinder-api-0" Jan 05 23:26:02 crc kubenswrapper[4910]: I0105 23:26:02.114731 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cd8216dd-10b4-41f8-bc8f-ef7437020264-scripts\") pod \"cinder-api-0\" (UID: \"cd8216dd-10b4-41f8-bc8f-ef7437020264\") " pod="openstack/cinder-api-0" Jan 05 23:26:02 crc kubenswrapper[4910]: I0105 23:26:02.139748 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sxsgh\" (UniqueName: \"kubernetes.io/projected/cd8216dd-10b4-41f8-bc8f-ef7437020264-kube-api-access-sxsgh\") pod \"cinder-api-0\" (UID: \"cd8216dd-10b4-41f8-bc8f-ef7437020264\") " pod="openstack/cinder-api-0" Jan 05 23:26:02 crc kubenswrapper[4910]: I0105 23:26:02.283937 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 05 23:26:02 crc kubenswrapper[4910]: I0105 23:26:02.646105 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6cb64bf69-8dbkn"] Jan 05 23:26:02 crc kubenswrapper[4910]: I0105 23:26:02.844630 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 05 23:26:02 crc kubenswrapper[4910]: W0105 23:26:02.849174 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcd8216dd_10b4_41f8_bc8f_ef7437020264.slice/crio-5878364884b3c4fec7564d8a1ed7b8e6122aed10fe0944baebfeddc4265715dc WatchSource:0}: Error finding container 5878364884b3c4fec7564d8a1ed7b8e6122aed10fe0944baebfeddc4265715dc: Status 404 returned error can't find the container with id 5878364884b3c4fec7564d8a1ed7b8e6122aed10fe0944baebfeddc4265715dc Jan 05 23:26:03 crc kubenswrapper[4910]: I0105 23:26:03.311318 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"cd8216dd-10b4-41f8-bc8f-ef7437020264","Type":"ContainerStarted","Data":"5878364884b3c4fec7564d8a1ed7b8e6122aed10fe0944baebfeddc4265715dc"} Jan 05 23:26:03 crc kubenswrapper[4910]: I0105 23:26:03.314968 4910 generic.go:334] "Generic (PLEG): container finished" podID="030ee9ad-b562-41c2-b1e2-f5be9e4e13b0" containerID="f387a0d309f3dbadce8b4ac31158b07dd41a8bef7625f3938029eea9c9a41889" exitCode=0 Jan 05 23:26:03 crc kubenswrapper[4910]: I0105 23:26:03.315002 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cb64bf69-8dbkn" event={"ID":"030ee9ad-b562-41c2-b1e2-f5be9e4e13b0","Type":"ContainerDied","Data":"f387a0d309f3dbadce8b4ac31158b07dd41a8bef7625f3938029eea9c9a41889"} Jan 05 23:26:03 crc kubenswrapper[4910]: I0105 23:26:03.315021 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cb64bf69-8dbkn" event={"ID":"030ee9ad-b562-41c2-b1e2-f5be9e4e13b0","Type":"ContainerStarted","Data":"0fc546a02c24ae0ffe9ff7651fd1e3eeb800dc4556aa61d46fe2662b4623ef28"} Jan 05 23:26:04 crc kubenswrapper[4910]: I0105 23:26:04.359184 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cb64bf69-8dbkn" event={"ID":"030ee9ad-b562-41c2-b1e2-f5be9e4e13b0","Type":"ContainerStarted","Data":"2089369f33fa2bba993962ac1a23b91cc59807065c237d34e497fb3ad6218ed6"} Jan 05 23:26:04 crc kubenswrapper[4910]: I0105 23:26:04.360041 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6cb64bf69-8dbkn" Jan 05 23:26:04 crc kubenswrapper[4910]: I0105 23:26:04.367842 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"cd8216dd-10b4-41f8-bc8f-ef7437020264","Type":"ContainerStarted","Data":"35e9ed7907bc2a1dee5ec6488324b2493fec8f5acdb20739fa960009854624d1"} Jan 05 23:26:04 crc kubenswrapper[4910]: I0105 23:26:04.367910 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"cd8216dd-10b4-41f8-bc8f-ef7437020264","Type":"ContainerStarted","Data":"1416900f8247c2d8e0d8ae1e0e6034eb2594d0fd195bc3b5000a43dc46be1439"} Jan 05 23:26:04 crc kubenswrapper[4910]: I0105 23:26:04.368764 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 05 23:26:04 crc kubenswrapper[4910]: I0105 23:26:04.386728 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6cb64bf69-8dbkn" podStartSLOduration=3.3867046849999998 podStartE2EDuration="3.386704685s" podCreationTimestamp="2026-01-05 23:26:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:26:04.383623849 +0000 UTC m=+5695.961121529" watchObservedRunningTime="2026-01-05 23:26:04.386704685 +0000 UTC m=+5695.964202365" Jan 05 23:26:04 crc kubenswrapper[4910]: I0105 23:26:04.407188 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.4071343020000002 podStartE2EDuration="3.407134302s" podCreationTimestamp="2026-01-05 23:26:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:26:04.400942218 +0000 UTC m=+5695.978439888" watchObservedRunningTime="2026-01-05 23:26:04.407134302 +0000 UTC m=+5695.984631972" Jan 05 23:26:12 crc kubenswrapper[4910]: I0105 23:26:12.052434 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6cb64bf69-8dbkn" Jan 05 23:26:12 crc kubenswrapper[4910]: I0105 23:26:12.133711 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7cb799b995-t6jp4"] Jan 05 23:26:12 crc kubenswrapper[4910]: I0105 23:26:12.134031 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7cb799b995-t6jp4" podUID="65ba7e68-ccfa-4b14-9d34-4b8e72f422b5" containerName="dnsmasq-dns" containerID="cri-o://c6300d99f48a9335ee14fcad9efc5a13e03c934c32cbb7708f24bca4ff283396" gracePeriod=10 Jan 05 23:26:12 crc kubenswrapper[4910]: I0105 23:26:12.462115 4910 generic.go:334] "Generic (PLEG): container finished" podID="65ba7e68-ccfa-4b14-9d34-4b8e72f422b5" containerID="c6300d99f48a9335ee14fcad9efc5a13e03c934c32cbb7708f24bca4ff283396" exitCode=0 Jan 05 23:26:12 crc kubenswrapper[4910]: I0105 23:26:12.462589 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb799b995-t6jp4" event={"ID":"65ba7e68-ccfa-4b14-9d34-4b8e72f422b5","Type":"ContainerDied","Data":"c6300d99f48a9335ee14fcad9efc5a13e03c934c32cbb7708f24bca4ff283396"} Jan 05 23:26:12 crc kubenswrapper[4910]: I0105 23:26:12.679454 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7cb799b995-t6jp4" Jan 05 23:26:12 crc kubenswrapper[4910]: I0105 23:26:12.853281 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/65ba7e68-ccfa-4b14-9d34-4b8e72f422b5-config\") pod \"65ba7e68-ccfa-4b14-9d34-4b8e72f422b5\" (UID: \"65ba7e68-ccfa-4b14-9d34-4b8e72f422b5\") " Jan 05 23:26:12 crc kubenswrapper[4910]: I0105 23:26:12.853368 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/65ba7e68-ccfa-4b14-9d34-4b8e72f422b5-dns-svc\") pod \"65ba7e68-ccfa-4b14-9d34-4b8e72f422b5\" (UID: \"65ba7e68-ccfa-4b14-9d34-4b8e72f422b5\") " Jan 05 23:26:12 crc kubenswrapper[4910]: I0105 23:26:12.853425 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mk287\" (UniqueName: \"kubernetes.io/projected/65ba7e68-ccfa-4b14-9d34-4b8e72f422b5-kube-api-access-mk287\") pod \"65ba7e68-ccfa-4b14-9d34-4b8e72f422b5\" (UID: \"65ba7e68-ccfa-4b14-9d34-4b8e72f422b5\") " Jan 05 23:26:12 crc kubenswrapper[4910]: I0105 23:26:12.853585 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/65ba7e68-ccfa-4b14-9d34-4b8e72f422b5-ovsdbserver-sb\") pod \"65ba7e68-ccfa-4b14-9d34-4b8e72f422b5\" (UID: \"65ba7e68-ccfa-4b14-9d34-4b8e72f422b5\") " Jan 05 23:26:12 crc kubenswrapper[4910]: I0105 23:26:12.853610 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/65ba7e68-ccfa-4b14-9d34-4b8e72f422b5-ovsdbserver-nb\") pod \"65ba7e68-ccfa-4b14-9d34-4b8e72f422b5\" (UID: \"65ba7e68-ccfa-4b14-9d34-4b8e72f422b5\") " Jan 05 23:26:12 crc kubenswrapper[4910]: I0105 23:26:12.870435 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65ba7e68-ccfa-4b14-9d34-4b8e72f422b5-kube-api-access-mk287" (OuterVolumeSpecName: "kube-api-access-mk287") pod "65ba7e68-ccfa-4b14-9d34-4b8e72f422b5" (UID: "65ba7e68-ccfa-4b14-9d34-4b8e72f422b5"). InnerVolumeSpecName "kube-api-access-mk287". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:26:12 crc kubenswrapper[4910]: I0105 23:26:12.929169 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/65ba7e68-ccfa-4b14-9d34-4b8e72f422b5-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "65ba7e68-ccfa-4b14-9d34-4b8e72f422b5" (UID: "65ba7e68-ccfa-4b14-9d34-4b8e72f422b5"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:26:12 crc kubenswrapper[4910]: I0105 23:26:12.942316 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/65ba7e68-ccfa-4b14-9d34-4b8e72f422b5-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "65ba7e68-ccfa-4b14-9d34-4b8e72f422b5" (UID: "65ba7e68-ccfa-4b14-9d34-4b8e72f422b5"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:26:12 crc kubenswrapper[4910]: I0105 23:26:12.957031 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mk287\" (UniqueName: \"kubernetes.io/projected/65ba7e68-ccfa-4b14-9d34-4b8e72f422b5-kube-api-access-mk287\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:12 crc kubenswrapper[4910]: I0105 23:26:12.957061 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/65ba7e68-ccfa-4b14-9d34-4b8e72f422b5-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:12 crc kubenswrapper[4910]: I0105 23:26:12.957070 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/65ba7e68-ccfa-4b14-9d34-4b8e72f422b5-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:12 crc kubenswrapper[4910]: I0105 23:26:12.970623 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/65ba7e68-ccfa-4b14-9d34-4b8e72f422b5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "65ba7e68-ccfa-4b14-9d34-4b8e72f422b5" (UID: "65ba7e68-ccfa-4b14-9d34-4b8e72f422b5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:26:12 crc kubenswrapper[4910]: I0105 23:26:12.981906 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/65ba7e68-ccfa-4b14-9d34-4b8e72f422b5-config" (OuterVolumeSpecName: "config") pod "65ba7e68-ccfa-4b14-9d34-4b8e72f422b5" (UID: "65ba7e68-ccfa-4b14-9d34-4b8e72f422b5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:26:13 crc kubenswrapper[4910]: I0105 23:26:13.058561 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/65ba7e68-ccfa-4b14-9d34-4b8e72f422b5-config\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:13 crc kubenswrapper[4910]: I0105 23:26:13.058815 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/65ba7e68-ccfa-4b14-9d34-4b8e72f422b5-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:13 crc kubenswrapper[4910]: I0105 23:26:13.475161 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb799b995-t6jp4" event={"ID":"65ba7e68-ccfa-4b14-9d34-4b8e72f422b5","Type":"ContainerDied","Data":"78ad9f84bddd6d7a51421fdc2d4cf8d7e909435b82170c7ad81d424fbacff5b3"} Jan 05 23:26:13 crc kubenswrapper[4910]: I0105 23:26:13.475446 4910 scope.go:117] "RemoveContainer" containerID="c6300d99f48a9335ee14fcad9efc5a13e03c934c32cbb7708f24bca4ff283396" Jan 05 23:26:13 crc kubenswrapper[4910]: I0105 23:26:13.475235 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7cb799b995-t6jp4" Jan 05 23:26:13 crc kubenswrapper[4910]: I0105 23:26:13.505620 4910 scope.go:117] "RemoveContainer" containerID="b0022a7e5e47042e1d6aebdfcbc68be824acfb80594513a9b59584d994fb52f8" Jan 05 23:26:13 crc kubenswrapper[4910]: I0105 23:26:13.520791 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7cb799b995-t6jp4"] Jan 05 23:26:13 crc kubenswrapper[4910]: I0105 23:26:13.529817 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7cb799b995-t6jp4"] Jan 05 23:26:13 crc kubenswrapper[4910]: I0105 23:26:13.908327 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 23:26:13 crc kubenswrapper[4910]: I0105 23:26:13.908984 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="776877a4-7fc0-449a-add4-ffe7357c90e2" containerName="nova-scheduler-scheduler" containerID="cri-o://d242f96dae00210855e1800d77cb6e0950b1c100e7d8a4ad68caec5aa3ed7fd9" gracePeriod=30 Jan 05 23:26:13 crc kubenswrapper[4910]: I0105 23:26:13.921946 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 05 23:26:13 crc kubenswrapper[4910]: I0105 23:26:13.922237 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="ee48c3fa-ab41-4e07-ba38-f7195ac868e0" containerName="nova-api-log" containerID="cri-o://95e71d4b9582ac3cf5d578aa5f7be38bded636fdd1b47b5ab6be7afa28853efc" gracePeriod=30 Jan 05 23:26:13 crc kubenswrapper[4910]: I0105 23:26:13.922393 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="ee48c3fa-ab41-4e07-ba38-f7195ac868e0" containerName="nova-api-api" containerID="cri-o://bd9a4af489bbe2a0a2ab3d2c62ed2fefda8e5d987d13b2320d5a2ec5bf7fd378" gracePeriod=30 Jan 05 23:26:13 crc kubenswrapper[4910]: I0105 23:26:13.938193 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 05 23:26:13 crc kubenswrapper[4910]: I0105 23:26:13.938469 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="9875106d-cbf9-402b-b7c1-0c3d00445606" containerName="nova-cell0-conductor-conductor" containerID="cri-o://1312808f6d5b1a456e98d8336d8d347e1eea4c15ed28035eb42e1dd2f964718d" gracePeriod=30 Jan 05 23:26:13 crc kubenswrapper[4910]: I0105 23:26:13.952689 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 23:26:13 crc kubenswrapper[4910]: I0105 23:26:13.952935 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="02bc116b-3352-4eb9-9c44-1283f355e711" containerName="nova-metadata-log" containerID="cri-o://3aa4cc65ec07b88424cae352c91af36b9d06998d7bb619a8af57d0ef39c45579" gracePeriod=30 Jan 05 23:26:13 crc kubenswrapper[4910]: I0105 23:26:13.953290 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="02bc116b-3352-4eb9-9c44-1283f355e711" containerName="nova-metadata-metadata" containerID="cri-o://9d9807d52b8f4547bf4a1eeef9201fc4433ff914522b4dc543a47126db851a66" gracePeriod=30 Jan 05 23:26:13 crc kubenswrapper[4910]: I0105 23:26:13.973320 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 05 23:26:13 crc kubenswrapper[4910]: I0105 23:26:13.973591 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="81e1f339-37da-4c90-9f60-b4d369ea06a9" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://cfe91149aac13f39a2503a5238dc89d8dfe2c1d8223e4a89c27c0523eab2437a" gracePeriod=30 Jan 05 23:26:14 crc kubenswrapper[4910]: I0105 23:26:14.320675 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 05 23:26:14 crc kubenswrapper[4910]: I0105 23:26:14.487815 4910 generic.go:334] "Generic (PLEG): container finished" podID="02bc116b-3352-4eb9-9c44-1283f355e711" containerID="3aa4cc65ec07b88424cae352c91af36b9d06998d7bb619a8af57d0ef39c45579" exitCode=143 Jan 05 23:26:14 crc kubenswrapper[4910]: I0105 23:26:14.488175 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"02bc116b-3352-4eb9-9c44-1283f355e711","Type":"ContainerDied","Data":"3aa4cc65ec07b88424cae352c91af36b9d06998d7bb619a8af57d0ef39c45579"} Jan 05 23:26:14 crc kubenswrapper[4910]: I0105 23:26:14.493044 4910 generic.go:334] "Generic (PLEG): container finished" podID="81e1f339-37da-4c90-9f60-b4d369ea06a9" containerID="cfe91149aac13f39a2503a5238dc89d8dfe2c1d8223e4a89c27c0523eab2437a" exitCode=0 Jan 05 23:26:14 crc kubenswrapper[4910]: I0105 23:26:14.493119 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"81e1f339-37da-4c90-9f60-b4d369ea06a9","Type":"ContainerDied","Data":"cfe91149aac13f39a2503a5238dc89d8dfe2c1d8223e4a89c27c0523eab2437a"} Jan 05 23:26:14 crc kubenswrapper[4910]: I0105 23:26:14.502215 4910 generic.go:334] "Generic (PLEG): container finished" podID="ee48c3fa-ab41-4e07-ba38-f7195ac868e0" containerID="95e71d4b9582ac3cf5d578aa5f7be38bded636fdd1b47b5ab6be7afa28853efc" exitCode=143 Jan 05 23:26:14 crc kubenswrapper[4910]: I0105 23:26:14.502270 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ee48c3fa-ab41-4e07-ba38-f7195ac868e0","Type":"ContainerDied","Data":"95e71d4b9582ac3cf5d578aa5f7be38bded636fdd1b47b5ab6be7afa28853efc"} Jan 05 23:26:14 crc kubenswrapper[4910]: I0105 23:26:14.736500 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="65ba7e68-ccfa-4b14-9d34-4b8e72f422b5" path="/var/lib/kubelet/pods/65ba7e68-ccfa-4b14-9d34-4b8e72f422b5/volumes" Jan 05 23:26:14 crc kubenswrapper[4910]: I0105 23:26:14.817745 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 05 23:26:14 crc kubenswrapper[4910]: I0105 23:26:14.892911 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81e1f339-37da-4c90-9f60-b4d369ea06a9-combined-ca-bundle\") pod \"81e1f339-37da-4c90-9f60-b4d369ea06a9\" (UID: \"81e1f339-37da-4c90-9f60-b4d369ea06a9\") " Jan 05 23:26:14 crc kubenswrapper[4910]: I0105 23:26:14.893009 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81e1f339-37da-4c90-9f60-b4d369ea06a9-config-data\") pod \"81e1f339-37da-4c90-9f60-b4d369ea06a9\" (UID: \"81e1f339-37da-4c90-9f60-b4d369ea06a9\") " Jan 05 23:26:14 crc kubenswrapper[4910]: I0105 23:26:14.893041 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbp6h\" (UniqueName: \"kubernetes.io/projected/81e1f339-37da-4c90-9f60-b4d369ea06a9-kube-api-access-dbp6h\") pod \"81e1f339-37da-4c90-9f60-b4d369ea06a9\" (UID: \"81e1f339-37da-4c90-9f60-b4d369ea06a9\") " Jan 05 23:26:14 crc kubenswrapper[4910]: I0105 23:26:14.917213 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81e1f339-37da-4c90-9f60-b4d369ea06a9-kube-api-access-dbp6h" (OuterVolumeSpecName: "kube-api-access-dbp6h") pod "81e1f339-37da-4c90-9f60-b4d369ea06a9" (UID: "81e1f339-37da-4c90-9f60-b4d369ea06a9"). InnerVolumeSpecName "kube-api-access-dbp6h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:26:14 crc kubenswrapper[4910]: I0105 23:26:14.922660 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81e1f339-37da-4c90-9f60-b4d369ea06a9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "81e1f339-37da-4c90-9f60-b4d369ea06a9" (UID: "81e1f339-37da-4c90-9f60-b4d369ea06a9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:26:14 crc kubenswrapper[4910]: I0105 23:26:14.928330 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81e1f339-37da-4c90-9f60-b4d369ea06a9-config-data" (OuterVolumeSpecName: "config-data") pod "81e1f339-37da-4c90-9f60-b4d369ea06a9" (UID: "81e1f339-37da-4c90-9f60-b4d369ea06a9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:26:14 crc kubenswrapper[4910]: I0105 23:26:14.994687 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81e1f339-37da-4c90-9f60-b4d369ea06a9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:14 crc kubenswrapper[4910]: I0105 23:26:14.994723 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81e1f339-37da-4c90-9f60-b4d369ea06a9-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:14 crc kubenswrapper[4910]: I0105 23:26:14.994736 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbp6h\" (UniqueName: \"kubernetes.io/projected/81e1f339-37da-4c90-9f60-b4d369ea06a9-kube-api-access-dbp6h\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:15 crc kubenswrapper[4910]: E0105 23:26:15.344308 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="d242f96dae00210855e1800d77cb6e0950b1c100e7d8a4ad68caec5aa3ed7fd9" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 05 23:26:15 crc kubenswrapper[4910]: E0105 23:26:15.347652 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="d242f96dae00210855e1800d77cb6e0950b1c100e7d8a4ad68caec5aa3ed7fd9" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 05 23:26:15 crc kubenswrapper[4910]: E0105 23:26:15.349114 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="d242f96dae00210855e1800d77cb6e0950b1c100e7d8a4ad68caec5aa3ed7fd9" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 05 23:26:15 crc kubenswrapper[4910]: E0105 23:26:15.349214 4910 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="776877a4-7fc0-449a-add4-ffe7357c90e2" containerName="nova-scheduler-scheduler" Jan 05 23:26:15 crc kubenswrapper[4910]: I0105 23:26:15.517190 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"81e1f339-37da-4c90-9f60-b4d369ea06a9","Type":"ContainerDied","Data":"362719a26f7c8fab1591acb150980888d712379620b35751c769c8234c677394"} Jan 05 23:26:15 crc kubenswrapper[4910]: I0105 23:26:15.517302 4910 scope.go:117] "RemoveContainer" containerID="cfe91149aac13f39a2503a5238dc89d8dfe2c1d8223e4a89c27c0523eab2437a" Jan 05 23:26:15 crc kubenswrapper[4910]: I0105 23:26:15.517491 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 05 23:26:15 crc kubenswrapper[4910]: I0105 23:26:15.573477 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 05 23:26:15 crc kubenswrapper[4910]: I0105 23:26:15.640200 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 05 23:26:15 crc kubenswrapper[4910]: I0105 23:26:15.658798 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 05 23:26:15 crc kubenswrapper[4910]: E0105 23:26:15.659998 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65ba7e68-ccfa-4b14-9d34-4b8e72f422b5" containerName="init" Jan 05 23:26:15 crc kubenswrapper[4910]: I0105 23:26:15.660027 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="65ba7e68-ccfa-4b14-9d34-4b8e72f422b5" containerName="init" Jan 05 23:26:15 crc kubenswrapper[4910]: E0105 23:26:15.660109 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81e1f339-37da-4c90-9f60-b4d369ea06a9" containerName="nova-cell1-novncproxy-novncproxy" Jan 05 23:26:15 crc kubenswrapper[4910]: I0105 23:26:15.661029 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="81e1f339-37da-4c90-9f60-b4d369ea06a9" containerName="nova-cell1-novncproxy-novncproxy" Jan 05 23:26:15 crc kubenswrapper[4910]: E0105 23:26:15.661090 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65ba7e68-ccfa-4b14-9d34-4b8e72f422b5" containerName="dnsmasq-dns" Jan 05 23:26:15 crc kubenswrapper[4910]: I0105 23:26:15.661102 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="65ba7e68-ccfa-4b14-9d34-4b8e72f422b5" containerName="dnsmasq-dns" Jan 05 23:26:15 crc kubenswrapper[4910]: I0105 23:26:15.661747 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="81e1f339-37da-4c90-9f60-b4d369ea06a9" containerName="nova-cell1-novncproxy-novncproxy" Jan 05 23:26:15 crc kubenswrapper[4910]: I0105 23:26:15.661800 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="65ba7e68-ccfa-4b14-9d34-4b8e72f422b5" containerName="dnsmasq-dns" Jan 05 23:26:15 crc kubenswrapper[4910]: I0105 23:26:15.663319 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 05 23:26:15 crc kubenswrapper[4910]: I0105 23:26:15.688291 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 05 23:26:15 crc kubenswrapper[4910]: I0105 23:26:15.702518 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 05 23:26:15 crc kubenswrapper[4910]: I0105 23:26:15.816720 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd2984f0-6e89-4170-a405-553944d7aad2-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"cd2984f0-6e89-4170-a405-553944d7aad2\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 23:26:15 crc kubenswrapper[4910]: I0105 23:26:15.816822 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd2984f0-6e89-4170-a405-553944d7aad2-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"cd2984f0-6e89-4170-a405-553944d7aad2\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 23:26:15 crc kubenswrapper[4910]: I0105 23:26:15.816922 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lx7pk\" (UniqueName: \"kubernetes.io/projected/cd2984f0-6e89-4170-a405-553944d7aad2-kube-api-access-lx7pk\") pod \"nova-cell1-novncproxy-0\" (UID: \"cd2984f0-6e89-4170-a405-553944d7aad2\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 23:26:15 crc kubenswrapper[4910]: I0105 23:26:15.918882 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd2984f0-6e89-4170-a405-553944d7aad2-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"cd2984f0-6e89-4170-a405-553944d7aad2\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 23:26:15 crc kubenswrapper[4910]: I0105 23:26:15.918947 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd2984f0-6e89-4170-a405-553944d7aad2-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"cd2984f0-6e89-4170-a405-553944d7aad2\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 23:26:15 crc kubenswrapper[4910]: I0105 23:26:15.919020 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lx7pk\" (UniqueName: \"kubernetes.io/projected/cd2984f0-6e89-4170-a405-553944d7aad2-kube-api-access-lx7pk\") pod \"nova-cell1-novncproxy-0\" (UID: \"cd2984f0-6e89-4170-a405-553944d7aad2\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 23:26:15 crc kubenswrapper[4910]: I0105 23:26:15.928741 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd2984f0-6e89-4170-a405-553944d7aad2-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"cd2984f0-6e89-4170-a405-553944d7aad2\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 23:26:15 crc kubenswrapper[4910]: I0105 23:26:15.931466 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd2984f0-6e89-4170-a405-553944d7aad2-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"cd2984f0-6e89-4170-a405-553944d7aad2\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 23:26:15 crc kubenswrapper[4910]: I0105 23:26:15.934967 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lx7pk\" (UniqueName: \"kubernetes.io/projected/cd2984f0-6e89-4170-a405-553944d7aad2-kube-api-access-lx7pk\") pod \"nova-cell1-novncproxy-0\" (UID: \"cd2984f0-6e89-4170-a405-553944d7aad2\") " pod="openstack/nova-cell1-novncproxy-0" Jan 05 23:26:16 crc kubenswrapper[4910]: I0105 23:26:16.020442 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 05 23:26:16 crc kubenswrapper[4910]: E0105 23:26:16.394934 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1312808f6d5b1a456e98d8336d8d347e1eea4c15ed28035eb42e1dd2f964718d is running failed: container process not found" containerID="1312808f6d5b1a456e98d8336d8d347e1eea4c15ed28035eb42e1dd2f964718d" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 05 23:26:16 crc kubenswrapper[4910]: E0105 23:26:16.398840 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1312808f6d5b1a456e98d8336d8d347e1eea4c15ed28035eb42e1dd2f964718d is running failed: container process not found" containerID="1312808f6d5b1a456e98d8336d8d347e1eea4c15ed28035eb42e1dd2f964718d" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 05 23:26:16 crc kubenswrapper[4910]: E0105 23:26:16.400570 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1312808f6d5b1a456e98d8336d8d347e1eea4c15ed28035eb42e1dd2f964718d is running failed: container process not found" containerID="1312808f6d5b1a456e98d8336d8d347e1eea4c15ed28035eb42e1dd2f964718d" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 05 23:26:16 crc kubenswrapper[4910]: E0105 23:26:16.400700 4910 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1312808f6d5b1a456e98d8336d8d347e1eea4c15ed28035eb42e1dd2f964718d is running failed: container process not found" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="9875106d-cbf9-402b-b7c1-0c3d00445606" containerName="nova-cell0-conductor-conductor" Jan 05 23:26:16 crc kubenswrapper[4910]: I0105 23:26:16.522690 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 05 23:26:16 crc kubenswrapper[4910]: I0105 23:26:16.528641 4910 generic.go:334] "Generic (PLEG): container finished" podID="9875106d-cbf9-402b-b7c1-0c3d00445606" containerID="1312808f6d5b1a456e98d8336d8d347e1eea4c15ed28035eb42e1dd2f964718d" exitCode=0 Jan 05 23:26:16 crc kubenswrapper[4910]: I0105 23:26:16.528692 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"9875106d-cbf9-402b-b7c1-0c3d00445606","Type":"ContainerDied","Data":"1312808f6d5b1a456e98d8336d8d347e1eea4c15ed28035eb42e1dd2f964718d"} Jan 05 23:26:16 crc kubenswrapper[4910]: W0105 23:26:16.543309 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcd2984f0_6e89_4170_a405_553944d7aad2.slice/crio-8bc07b10c6459aea23c50de8acd0b2635b44d6a91e1b5030c48257135280483a WatchSource:0}: Error finding container 8bc07b10c6459aea23c50de8acd0b2635b44d6a91e1b5030c48257135280483a: Status 404 returned error can't find the container with id 8bc07b10c6459aea23c50de8acd0b2635b44d6a91e1b5030c48257135280483a Jan 05 23:26:16 crc kubenswrapper[4910]: I0105 23:26:16.712796 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 05 23:26:16 crc kubenswrapper[4910]: I0105 23:26:16.757189 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81e1f339-37da-4c90-9f60-b4d369ea06a9" path="/var/lib/kubelet/pods/81e1f339-37da-4c90-9f60-b4d369ea06a9/volumes" Jan 05 23:26:16 crc kubenswrapper[4910]: I0105 23:26:16.842827 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9875106d-cbf9-402b-b7c1-0c3d00445606-config-data\") pod \"9875106d-cbf9-402b-b7c1-0c3d00445606\" (UID: \"9875106d-cbf9-402b-b7c1-0c3d00445606\") " Jan 05 23:26:16 crc kubenswrapper[4910]: I0105 23:26:16.842909 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5vmvt\" (UniqueName: \"kubernetes.io/projected/9875106d-cbf9-402b-b7c1-0c3d00445606-kube-api-access-5vmvt\") pod \"9875106d-cbf9-402b-b7c1-0c3d00445606\" (UID: \"9875106d-cbf9-402b-b7c1-0c3d00445606\") " Jan 05 23:26:16 crc kubenswrapper[4910]: I0105 23:26:16.843064 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9875106d-cbf9-402b-b7c1-0c3d00445606-combined-ca-bundle\") pod \"9875106d-cbf9-402b-b7c1-0c3d00445606\" (UID: \"9875106d-cbf9-402b-b7c1-0c3d00445606\") " Jan 05 23:26:16 crc kubenswrapper[4910]: I0105 23:26:16.850361 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9875106d-cbf9-402b-b7c1-0c3d00445606-kube-api-access-5vmvt" (OuterVolumeSpecName: "kube-api-access-5vmvt") pod "9875106d-cbf9-402b-b7c1-0c3d00445606" (UID: "9875106d-cbf9-402b-b7c1-0c3d00445606"). InnerVolumeSpecName "kube-api-access-5vmvt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:26:16 crc kubenswrapper[4910]: I0105 23:26:16.872441 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9875106d-cbf9-402b-b7c1-0c3d00445606-config-data" (OuterVolumeSpecName: "config-data") pod "9875106d-cbf9-402b-b7c1-0c3d00445606" (UID: "9875106d-cbf9-402b-b7c1-0c3d00445606"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:26:16 crc kubenswrapper[4910]: I0105 23:26:16.879528 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9875106d-cbf9-402b-b7c1-0c3d00445606-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9875106d-cbf9-402b-b7c1-0c3d00445606" (UID: "9875106d-cbf9-402b-b7c1-0c3d00445606"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:26:16 crc kubenswrapper[4910]: I0105 23:26:16.946930 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9875106d-cbf9-402b-b7c1-0c3d00445606-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:16 crc kubenswrapper[4910]: I0105 23:26:16.946984 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5vmvt\" (UniqueName: \"kubernetes.io/projected/9875106d-cbf9-402b-b7c1-0c3d00445606-kube-api-access-5vmvt\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:16 crc kubenswrapper[4910]: I0105 23:26:16.947001 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9875106d-cbf9-402b-b7c1-0c3d00445606-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.077371 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-api-0" podUID="ee48c3fa-ab41-4e07-ba38-f7195ac868e0" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.76:8774/\": read tcp 10.217.0.2:56056->10.217.1.76:8774: read: connection reset by peer" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.077416 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-api-0" podUID="ee48c3fa-ab41-4e07-ba38-f7195ac868e0" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.76:8774/\": read tcp 10.217.0.2:56044->10.217.1.76:8774: read: connection reset by peer" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.137653 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.137861 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="55cf337c-c5d7-48cb-a18f-8c926b8c77e1" containerName="nova-cell1-conductor-conductor" containerID="cri-o://9a0e9ef7374f1bd2d5f92b2f62ba2f4b630ee553ebbe4f5c8b1de8c0a545a91c" gracePeriod=30 Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.548217 4910 generic.go:334] "Generic (PLEG): container finished" podID="02bc116b-3352-4eb9-9c44-1283f355e711" containerID="9d9807d52b8f4547bf4a1eeef9201fc4433ff914522b4dc543a47126db851a66" exitCode=0 Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.548585 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"02bc116b-3352-4eb9-9c44-1283f355e711","Type":"ContainerDied","Data":"9d9807d52b8f4547bf4a1eeef9201fc4433ff914522b4dc543a47126db851a66"} Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.549678 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"9875106d-cbf9-402b-b7c1-0c3d00445606","Type":"ContainerDied","Data":"c9ffdda37517b1da6427794deebd7cf45deb559d1704f699a353655e2cf05609"} Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.549710 4910 scope.go:117] "RemoveContainer" containerID="1312808f6d5b1a456e98d8336d8d347e1eea4c15ed28035eb42e1dd2f964718d" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.549872 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.579668 4910 generic.go:334] "Generic (PLEG): container finished" podID="ee48c3fa-ab41-4e07-ba38-f7195ac868e0" containerID="bd9a4af489bbe2a0a2ab3d2c62ed2fefda8e5d987d13b2320d5a2ec5bf7fd378" exitCode=0 Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.579785 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ee48c3fa-ab41-4e07-ba38-f7195ac868e0","Type":"ContainerDied","Data":"bd9a4af489bbe2a0a2ab3d2c62ed2fefda8e5d987d13b2320d5a2ec5bf7fd378"} Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.589419 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"cd2984f0-6e89-4170-a405-553944d7aad2","Type":"ContainerStarted","Data":"2574cf2ba2fc3cd263291390b864869f02f46db5223044986cccb723c60a465f"} Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.589475 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"cd2984f0-6e89-4170-a405-553944d7aad2","Type":"ContainerStarted","Data":"8bc07b10c6459aea23c50de8acd0b2635b44d6a91e1b5030c48257135280483a"} Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.615921 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.615900795 podStartE2EDuration="2.615900795s" podCreationTimestamp="2026-01-05 23:26:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:26:17.60441789 +0000 UTC m=+5709.181915560" watchObservedRunningTime="2026-01-05 23:26:17.615900795 +0000 UTC m=+5709.193398465" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.649842 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.664405 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.671077 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.688647 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.707472 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 05 23:26:17 crc kubenswrapper[4910]: E0105 23:26:17.707920 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee48c3fa-ab41-4e07-ba38-f7195ac868e0" containerName="nova-api-log" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.707939 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee48c3fa-ab41-4e07-ba38-f7195ac868e0" containerName="nova-api-log" Jan 05 23:26:17 crc kubenswrapper[4910]: E0105 23:26:17.707949 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee48c3fa-ab41-4e07-ba38-f7195ac868e0" containerName="nova-api-api" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.707954 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee48c3fa-ab41-4e07-ba38-f7195ac868e0" containerName="nova-api-api" Jan 05 23:26:17 crc kubenswrapper[4910]: E0105 23:26:17.707970 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9875106d-cbf9-402b-b7c1-0c3d00445606" containerName="nova-cell0-conductor-conductor" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.707976 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="9875106d-cbf9-402b-b7c1-0c3d00445606" containerName="nova-cell0-conductor-conductor" Jan 05 23:26:17 crc kubenswrapper[4910]: E0105 23:26:17.707999 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02bc116b-3352-4eb9-9c44-1283f355e711" containerName="nova-metadata-log" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.708006 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="02bc116b-3352-4eb9-9c44-1283f355e711" containerName="nova-metadata-log" Jan 05 23:26:17 crc kubenswrapper[4910]: E0105 23:26:17.708020 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02bc116b-3352-4eb9-9c44-1283f355e711" containerName="nova-metadata-metadata" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.708025 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="02bc116b-3352-4eb9-9c44-1283f355e711" containerName="nova-metadata-metadata" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.708207 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="02bc116b-3352-4eb9-9c44-1283f355e711" containerName="nova-metadata-log" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.708217 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="02bc116b-3352-4eb9-9c44-1283f355e711" containerName="nova-metadata-metadata" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.708230 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee48c3fa-ab41-4e07-ba38-f7195ac868e0" containerName="nova-api-log" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.708246 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="9875106d-cbf9-402b-b7c1-0c3d00445606" containerName="nova-cell0-conductor-conductor" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.708272 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee48c3fa-ab41-4e07-ba38-f7195ac868e0" containerName="nova-api-api" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.709091 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.717644 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.736337 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.763516 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v5nsv\" (UniqueName: \"kubernetes.io/projected/02bc116b-3352-4eb9-9c44-1283f355e711-kube-api-access-v5nsv\") pod \"02bc116b-3352-4eb9-9c44-1283f355e711\" (UID: \"02bc116b-3352-4eb9-9c44-1283f355e711\") " Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.763984 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee48c3fa-ab41-4e07-ba38-f7195ac868e0-combined-ca-bundle\") pod \"ee48c3fa-ab41-4e07-ba38-f7195ac868e0\" (UID: \"ee48c3fa-ab41-4e07-ba38-f7195ac868e0\") " Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.764155 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02bc116b-3352-4eb9-9c44-1283f355e711-config-data\") pod \"02bc116b-3352-4eb9-9c44-1283f355e711\" (UID: \"02bc116b-3352-4eb9-9c44-1283f355e711\") " Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.764231 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/02bc116b-3352-4eb9-9c44-1283f355e711-logs\") pod \"02bc116b-3352-4eb9-9c44-1283f355e711\" (UID: \"02bc116b-3352-4eb9-9c44-1283f355e711\") " Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.764262 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02bc116b-3352-4eb9-9c44-1283f355e711-combined-ca-bundle\") pod \"02bc116b-3352-4eb9-9c44-1283f355e711\" (UID: \"02bc116b-3352-4eb9-9c44-1283f355e711\") " Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.764421 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee48c3fa-ab41-4e07-ba38-f7195ac868e0-config-data\") pod \"ee48c3fa-ab41-4e07-ba38-f7195ac868e0\" (UID: \"ee48c3fa-ab41-4e07-ba38-f7195ac868e0\") " Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.764483 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee48c3fa-ab41-4e07-ba38-f7195ac868e0-logs\") pod \"ee48c3fa-ab41-4e07-ba38-f7195ac868e0\" (UID: \"ee48c3fa-ab41-4e07-ba38-f7195ac868e0\") " Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.764624 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5bsnp\" (UniqueName: \"kubernetes.io/projected/ee48c3fa-ab41-4e07-ba38-f7195ac868e0-kube-api-access-5bsnp\") pod \"ee48c3fa-ab41-4e07-ba38-f7195ac868e0\" (UID: \"ee48c3fa-ab41-4e07-ba38-f7195ac868e0\") " Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.767529 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/02bc116b-3352-4eb9-9c44-1283f355e711-logs" (OuterVolumeSpecName: "logs") pod "02bc116b-3352-4eb9-9c44-1283f355e711" (UID: "02bc116b-3352-4eb9-9c44-1283f355e711"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.768350 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ee48c3fa-ab41-4e07-ba38-f7195ac868e0-logs" (OuterVolumeSpecName: "logs") pod "ee48c3fa-ab41-4e07-ba38-f7195ac868e0" (UID: "ee48c3fa-ab41-4e07-ba38-f7195ac868e0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.768965 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02bc116b-3352-4eb9-9c44-1283f355e711-kube-api-access-v5nsv" (OuterVolumeSpecName: "kube-api-access-v5nsv") pod "02bc116b-3352-4eb9-9c44-1283f355e711" (UID: "02bc116b-3352-4eb9-9c44-1283f355e711"). InnerVolumeSpecName "kube-api-access-v5nsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.773755 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee48c3fa-ab41-4e07-ba38-f7195ac868e0-kube-api-access-5bsnp" (OuterVolumeSpecName: "kube-api-access-5bsnp") pod "ee48c3fa-ab41-4e07-ba38-f7195ac868e0" (UID: "ee48c3fa-ab41-4e07-ba38-f7195ac868e0"). InnerVolumeSpecName "kube-api-access-5bsnp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.805476 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee48c3fa-ab41-4e07-ba38-f7195ac868e0-config-data" (OuterVolumeSpecName: "config-data") pod "ee48c3fa-ab41-4e07-ba38-f7195ac868e0" (UID: "ee48c3fa-ab41-4e07-ba38-f7195ac868e0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.815463 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02bc116b-3352-4eb9-9c44-1283f355e711-config-data" (OuterVolumeSpecName: "config-data") pod "02bc116b-3352-4eb9-9c44-1283f355e711" (UID: "02bc116b-3352-4eb9-9c44-1283f355e711"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.815774 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02bc116b-3352-4eb9-9c44-1283f355e711-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "02bc116b-3352-4eb9-9c44-1283f355e711" (UID: "02bc116b-3352-4eb9-9c44-1283f355e711"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.821243 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee48c3fa-ab41-4e07-ba38-f7195ac868e0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ee48c3fa-ab41-4e07-ba38-f7195ac868e0" (UID: "ee48c3fa-ab41-4e07-ba38-f7195ac868e0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.867247 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qdc4\" (UniqueName: \"kubernetes.io/projected/5d354431-26ed-479a-baa3-bd24ab0abc2a-kube-api-access-4qdc4\") pod \"nova-cell0-conductor-0\" (UID: \"5d354431-26ed-479a-baa3-bd24ab0abc2a\") " pod="openstack/nova-cell0-conductor-0" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.867438 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d354431-26ed-479a-baa3-bd24ab0abc2a-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"5d354431-26ed-479a-baa3-bd24ab0abc2a\") " pod="openstack/nova-cell0-conductor-0" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.867884 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d354431-26ed-479a-baa3-bd24ab0abc2a-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"5d354431-26ed-479a-baa3-bd24ab0abc2a\") " pod="openstack/nova-cell0-conductor-0" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.868573 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5bsnp\" (UniqueName: \"kubernetes.io/projected/ee48c3fa-ab41-4e07-ba38-f7195ac868e0-kube-api-access-5bsnp\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.868609 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v5nsv\" (UniqueName: \"kubernetes.io/projected/02bc116b-3352-4eb9-9c44-1283f355e711-kube-api-access-v5nsv\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.868622 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee48c3fa-ab41-4e07-ba38-f7195ac868e0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.868634 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02bc116b-3352-4eb9-9c44-1283f355e711-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.868646 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/02bc116b-3352-4eb9-9c44-1283f355e711-logs\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.868655 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02bc116b-3352-4eb9-9c44-1283f355e711-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.868666 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee48c3fa-ab41-4e07-ba38-f7195ac868e0-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.868676 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee48c3fa-ab41-4e07-ba38-f7195ac868e0-logs\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.970214 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d354431-26ed-479a-baa3-bd24ab0abc2a-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"5d354431-26ed-479a-baa3-bd24ab0abc2a\") " pod="openstack/nova-cell0-conductor-0" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.970331 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qdc4\" (UniqueName: \"kubernetes.io/projected/5d354431-26ed-479a-baa3-bd24ab0abc2a-kube-api-access-4qdc4\") pod \"nova-cell0-conductor-0\" (UID: \"5d354431-26ed-479a-baa3-bd24ab0abc2a\") " pod="openstack/nova-cell0-conductor-0" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.970381 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d354431-26ed-479a-baa3-bd24ab0abc2a-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"5d354431-26ed-479a-baa3-bd24ab0abc2a\") " pod="openstack/nova-cell0-conductor-0" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.989027 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d354431-26ed-479a-baa3-bd24ab0abc2a-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"5d354431-26ed-479a-baa3-bd24ab0abc2a\") " pod="openstack/nova-cell0-conductor-0" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.991768 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qdc4\" (UniqueName: \"kubernetes.io/projected/5d354431-26ed-479a-baa3-bd24ab0abc2a-kube-api-access-4qdc4\") pod \"nova-cell0-conductor-0\" (UID: \"5d354431-26ed-479a-baa3-bd24ab0abc2a\") " pod="openstack/nova-cell0-conductor-0" Jan 05 23:26:17 crc kubenswrapper[4910]: I0105 23:26:17.993340 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d354431-26ed-479a-baa3-bd24ab0abc2a-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"5d354431-26ed-479a-baa3-bd24ab0abc2a\") " pod="openstack/nova-cell0-conductor-0" Jan 05 23:26:18 crc kubenswrapper[4910]: I0105 23:26:18.039836 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 05 23:26:18 crc kubenswrapper[4910]: I0105 23:26:18.515747 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 05 23:26:18 crc kubenswrapper[4910]: I0105 23:26:18.606015 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ee48c3fa-ab41-4e07-ba38-f7195ac868e0","Type":"ContainerDied","Data":"f5ba95223b9b2348f65c9382f3ae84b1e615ff7dbc18678c6bd89eb3d712d73e"} Jan 05 23:26:18 crc kubenswrapper[4910]: I0105 23:26:18.606078 4910 scope.go:117] "RemoveContainer" containerID="bd9a4af489bbe2a0a2ab3d2c62ed2fefda8e5d987d13b2320d5a2ec5bf7fd378" Jan 05 23:26:18 crc kubenswrapper[4910]: I0105 23:26:18.606043 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 05 23:26:18 crc kubenswrapper[4910]: I0105 23:26:18.614871 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 05 23:26:18 crc kubenswrapper[4910]: I0105 23:26:18.615447 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"02bc116b-3352-4eb9-9c44-1283f355e711","Type":"ContainerDied","Data":"f63f22f7cd3d70ce75b125bf0acdc5497e805efde1b50bccb07bf29d03336789"} Jan 05 23:26:18 crc kubenswrapper[4910]: I0105 23:26:18.619292 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"5d354431-26ed-479a-baa3-bd24ab0abc2a","Type":"ContainerStarted","Data":"29a8e1903aebbd80921689bfa44a87c2c923422d4b659ef966fc44d04b85edf7"} Jan 05 23:26:18 crc kubenswrapper[4910]: I0105 23:26:18.659495 4910 scope.go:117] "RemoveContainer" containerID="95e71d4b9582ac3cf5d578aa5f7be38bded636fdd1b47b5ab6be7afa28853efc" Jan 05 23:26:18 crc kubenswrapper[4910]: I0105 23:26:18.663171 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 05 23:26:18 crc kubenswrapper[4910]: I0105 23:26:18.726845 4910 scope.go:117] "RemoveContainer" containerID="9d9807d52b8f4547bf4a1eeef9201fc4433ff914522b4dc543a47126db851a66" Jan 05 23:26:18 crc kubenswrapper[4910]: I0105 23:26:18.781984 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9875106d-cbf9-402b-b7c1-0c3d00445606" path="/var/lib/kubelet/pods/9875106d-cbf9-402b-b7c1-0c3d00445606/volumes" Jan 05 23:26:18 crc kubenswrapper[4910]: I0105 23:26:18.783570 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 05 23:26:18 crc kubenswrapper[4910]: I0105 23:26:18.790395 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 23:26:18 crc kubenswrapper[4910]: I0105 23:26:18.793395 4910 scope.go:117] "RemoveContainer" containerID="3aa4cc65ec07b88424cae352c91af36b9d06998d7bb619a8af57d0ef39c45579" Jan 05 23:26:18 crc kubenswrapper[4910]: I0105 23:26:18.800540 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 23:26:18 crc kubenswrapper[4910]: I0105 23:26:18.806841 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 05 23:26:18 crc kubenswrapper[4910]: I0105 23:26:18.808507 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 05 23:26:18 crc kubenswrapper[4910]: I0105 23:26:18.813383 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 05 23:26:18 crc kubenswrapper[4910]: I0105 23:26:18.818169 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 05 23:26:18 crc kubenswrapper[4910]: I0105 23:26:18.819718 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 05 23:26:18 crc kubenswrapper[4910]: I0105 23:26:18.823725 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 05 23:26:18 crc kubenswrapper[4910]: I0105 23:26:18.830468 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 05 23:26:18 crc kubenswrapper[4910]: I0105 23:26:18.841485 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 23:26:18 crc kubenswrapper[4910]: I0105 23:26:18.902257 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dm6x2\" (UniqueName: \"kubernetes.io/projected/b4ff4c84-7f6f-4bf8-a602-bbe609bf14c3-kube-api-access-dm6x2\") pod \"nova-api-0\" (UID: \"b4ff4c84-7f6f-4bf8-a602-bbe609bf14c3\") " pod="openstack/nova-api-0" Jan 05 23:26:18 crc kubenswrapper[4910]: I0105 23:26:18.902332 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4ff4c84-7f6f-4bf8-a602-bbe609bf14c3-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b4ff4c84-7f6f-4bf8-a602-bbe609bf14c3\") " pod="openstack/nova-api-0" Jan 05 23:26:18 crc kubenswrapper[4910]: I0105 23:26:18.902578 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4ff4c84-7f6f-4bf8-a602-bbe609bf14c3-config-data\") pod \"nova-api-0\" (UID: \"b4ff4c84-7f6f-4bf8-a602-bbe609bf14c3\") " pod="openstack/nova-api-0" Jan 05 23:26:18 crc kubenswrapper[4910]: I0105 23:26:18.902964 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b4ff4c84-7f6f-4bf8-a602-bbe609bf14c3-logs\") pod \"nova-api-0\" (UID: \"b4ff4c84-7f6f-4bf8-a602-bbe609bf14c3\") " pod="openstack/nova-api-0" Jan 05 23:26:19 crc kubenswrapper[4910]: I0105 23:26:19.005082 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d6f7b35f-ff01-4fa9-b914-7735a6bf716e-logs\") pod \"nova-metadata-0\" (UID: \"d6f7b35f-ff01-4fa9-b914-7735a6bf716e\") " pod="openstack/nova-metadata-0" Jan 05 23:26:19 crc kubenswrapper[4910]: I0105 23:26:19.005475 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6f7b35f-ff01-4fa9-b914-7735a6bf716e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d6f7b35f-ff01-4fa9-b914-7735a6bf716e\") " pod="openstack/nova-metadata-0" Jan 05 23:26:19 crc kubenswrapper[4910]: I0105 23:26:19.005581 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b4ff4c84-7f6f-4bf8-a602-bbe609bf14c3-logs\") pod \"nova-api-0\" (UID: \"b4ff4c84-7f6f-4bf8-a602-bbe609bf14c3\") " pod="openstack/nova-api-0" Jan 05 23:26:19 crc kubenswrapper[4910]: I0105 23:26:19.005642 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dm6x2\" (UniqueName: \"kubernetes.io/projected/b4ff4c84-7f6f-4bf8-a602-bbe609bf14c3-kube-api-access-dm6x2\") pod \"nova-api-0\" (UID: \"b4ff4c84-7f6f-4bf8-a602-bbe609bf14c3\") " pod="openstack/nova-api-0" Jan 05 23:26:19 crc kubenswrapper[4910]: I0105 23:26:19.005701 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4ff4c84-7f6f-4bf8-a602-bbe609bf14c3-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b4ff4c84-7f6f-4bf8-a602-bbe609bf14c3\") " pod="openstack/nova-api-0" Jan 05 23:26:19 crc kubenswrapper[4910]: I0105 23:26:19.005772 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4ff4c84-7f6f-4bf8-a602-bbe609bf14c3-config-data\") pod \"nova-api-0\" (UID: \"b4ff4c84-7f6f-4bf8-a602-bbe609bf14c3\") " pod="openstack/nova-api-0" Jan 05 23:26:19 crc kubenswrapper[4910]: I0105 23:26:19.005838 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6f7b35f-ff01-4fa9-b914-7735a6bf716e-config-data\") pod \"nova-metadata-0\" (UID: \"d6f7b35f-ff01-4fa9-b914-7735a6bf716e\") " pod="openstack/nova-metadata-0" Jan 05 23:26:19 crc kubenswrapper[4910]: I0105 23:26:19.005875 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-66rnl\" (UniqueName: \"kubernetes.io/projected/d6f7b35f-ff01-4fa9-b914-7735a6bf716e-kube-api-access-66rnl\") pod \"nova-metadata-0\" (UID: \"d6f7b35f-ff01-4fa9-b914-7735a6bf716e\") " pod="openstack/nova-metadata-0" Jan 05 23:26:19 crc kubenswrapper[4910]: I0105 23:26:19.006146 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b4ff4c84-7f6f-4bf8-a602-bbe609bf14c3-logs\") pod \"nova-api-0\" (UID: \"b4ff4c84-7f6f-4bf8-a602-bbe609bf14c3\") " pod="openstack/nova-api-0" Jan 05 23:26:19 crc kubenswrapper[4910]: I0105 23:26:19.011664 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4ff4c84-7f6f-4bf8-a602-bbe609bf14c3-config-data\") pod \"nova-api-0\" (UID: \"b4ff4c84-7f6f-4bf8-a602-bbe609bf14c3\") " pod="openstack/nova-api-0" Jan 05 23:26:19 crc kubenswrapper[4910]: I0105 23:26:19.014906 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4ff4c84-7f6f-4bf8-a602-bbe609bf14c3-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b4ff4c84-7f6f-4bf8-a602-bbe609bf14c3\") " pod="openstack/nova-api-0" Jan 05 23:26:19 crc kubenswrapper[4910]: I0105 23:26:19.051733 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dm6x2\" (UniqueName: \"kubernetes.io/projected/b4ff4c84-7f6f-4bf8-a602-bbe609bf14c3-kube-api-access-dm6x2\") pod \"nova-api-0\" (UID: \"b4ff4c84-7f6f-4bf8-a602-bbe609bf14c3\") " pod="openstack/nova-api-0" Jan 05 23:26:19 crc kubenswrapper[4910]: I0105 23:26:19.107698 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d6f7b35f-ff01-4fa9-b914-7735a6bf716e-logs\") pod \"nova-metadata-0\" (UID: \"d6f7b35f-ff01-4fa9-b914-7735a6bf716e\") " pod="openstack/nova-metadata-0" Jan 05 23:26:19 crc kubenswrapper[4910]: I0105 23:26:19.107755 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6f7b35f-ff01-4fa9-b914-7735a6bf716e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d6f7b35f-ff01-4fa9-b914-7735a6bf716e\") " pod="openstack/nova-metadata-0" Jan 05 23:26:19 crc kubenswrapper[4910]: I0105 23:26:19.107866 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6f7b35f-ff01-4fa9-b914-7735a6bf716e-config-data\") pod \"nova-metadata-0\" (UID: \"d6f7b35f-ff01-4fa9-b914-7735a6bf716e\") " pod="openstack/nova-metadata-0" Jan 05 23:26:19 crc kubenswrapper[4910]: I0105 23:26:19.107891 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-66rnl\" (UniqueName: \"kubernetes.io/projected/d6f7b35f-ff01-4fa9-b914-7735a6bf716e-kube-api-access-66rnl\") pod \"nova-metadata-0\" (UID: \"d6f7b35f-ff01-4fa9-b914-7735a6bf716e\") " pod="openstack/nova-metadata-0" Jan 05 23:26:19 crc kubenswrapper[4910]: I0105 23:26:19.108545 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d6f7b35f-ff01-4fa9-b914-7735a6bf716e-logs\") pod \"nova-metadata-0\" (UID: \"d6f7b35f-ff01-4fa9-b914-7735a6bf716e\") " pod="openstack/nova-metadata-0" Jan 05 23:26:19 crc kubenswrapper[4910]: I0105 23:26:19.111059 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6f7b35f-ff01-4fa9-b914-7735a6bf716e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d6f7b35f-ff01-4fa9-b914-7735a6bf716e\") " pod="openstack/nova-metadata-0" Jan 05 23:26:19 crc kubenswrapper[4910]: I0105 23:26:19.117652 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6f7b35f-ff01-4fa9-b914-7735a6bf716e-config-data\") pod \"nova-metadata-0\" (UID: \"d6f7b35f-ff01-4fa9-b914-7735a6bf716e\") " pod="openstack/nova-metadata-0" Jan 05 23:26:19 crc kubenswrapper[4910]: I0105 23:26:19.138606 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-66rnl\" (UniqueName: \"kubernetes.io/projected/d6f7b35f-ff01-4fa9-b914-7735a6bf716e-kube-api-access-66rnl\") pod \"nova-metadata-0\" (UID: \"d6f7b35f-ff01-4fa9-b914-7735a6bf716e\") " pod="openstack/nova-metadata-0" Jan 05 23:26:19 crc kubenswrapper[4910]: I0105 23:26:19.140582 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 05 23:26:19 crc kubenswrapper[4910]: I0105 23:26:19.148577 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 05 23:26:19 crc kubenswrapper[4910]: I0105 23:26:19.664490 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"5d354431-26ed-479a-baa3-bd24ab0abc2a","Type":"ContainerStarted","Data":"70e87ff24781f5af7f4851d18f302cb0a3cfd84d1c6623c089f3b0d85c255518"} Jan 05 23:26:19 crc kubenswrapper[4910]: I0105 23:26:19.666611 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Jan 05 23:26:19 crc kubenswrapper[4910]: I0105 23:26:19.668655 4910 generic.go:334] "Generic (PLEG): container finished" podID="55cf337c-c5d7-48cb-a18f-8c926b8c77e1" containerID="9a0e9ef7374f1bd2d5f92b2f62ba2f4b630ee553ebbe4f5c8b1de8c0a545a91c" exitCode=0 Jan 05 23:26:19 crc kubenswrapper[4910]: I0105 23:26:19.668812 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"55cf337c-c5d7-48cb-a18f-8c926b8c77e1","Type":"ContainerDied","Data":"9a0e9ef7374f1bd2d5f92b2f62ba2f4b630ee553ebbe4f5c8b1de8c0a545a91c"} Jan 05 23:26:19 crc kubenswrapper[4910]: I0105 23:26:19.688538 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.688517138 podStartE2EDuration="2.688517138s" podCreationTimestamp="2026-01-05 23:26:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:26:19.683605616 +0000 UTC m=+5711.261103286" watchObservedRunningTime="2026-01-05 23:26:19.688517138 +0000 UTC m=+5711.266014828" Jan 05 23:26:19 crc kubenswrapper[4910]: E0105 23:26:19.702223 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9a0e9ef7374f1bd2d5f92b2f62ba2f4b630ee553ebbe4f5c8b1de8c0a545a91c is running failed: container process not found" containerID="9a0e9ef7374f1bd2d5f92b2f62ba2f4b630ee553ebbe4f5c8b1de8c0a545a91c" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 05 23:26:19 crc kubenswrapper[4910]: E0105 23:26:19.702994 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9a0e9ef7374f1bd2d5f92b2f62ba2f4b630ee553ebbe4f5c8b1de8c0a545a91c is running failed: container process not found" containerID="9a0e9ef7374f1bd2d5f92b2f62ba2f4b630ee553ebbe4f5c8b1de8c0a545a91c" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 05 23:26:19 crc kubenswrapper[4910]: E0105 23:26:19.703781 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9a0e9ef7374f1bd2d5f92b2f62ba2f4b630ee553ebbe4f5c8b1de8c0a545a91c is running failed: container process not found" containerID="9a0e9ef7374f1bd2d5f92b2f62ba2f4b630ee553ebbe4f5c8b1de8c0a545a91c" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 05 23:26:19 crc kubenswrapper[4910]: E0105 23:26:19.703840 4910 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9a0e9ef7374f1bd2d5f92b2f62ba2f4b630ee553ebbe4f5c8b1de8c0a545a91c is running failed: container process not found" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="55cf337c-c5d7-48cb-a18f-8c926b8c77e1" containerName="nova-cell1-conductor-conductor" Jan 05 23:26:19 crc kubenswrapper[4910]: I0105 23:26:19.704078 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 05 23:26:19 crc kubenswrapper[4910]: W0105 23:26:19.712221 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4ff4c84_7f6f_4bf8_a602_bbe609bf14c3.slice/crio-38cfb3c6f4e2d2d09c0116f08588c5089e22e53a2a5fdbcf70a92663b68f2641 WatchSource:0}: Error finding container 38cfb3c6f4e2d2d09c0116f08588c5089e22e53a2a5fdbcf70a92663b68f2641: Status 404 returned error can't find the container with id 38cfb3c6f4e2d2d09c0116f08588c5089e22e53a2a5fdbcf70a92663b68f2641 Jan 05 23:26:19 crc kubenswrapper[4910]: I0105 23:26:19.814910 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 05 23:26:19 crc kubenswrapper[4910]: W0105 23:26:19.819554 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd6f7b35f_ff01_4fa9_b914_7735a6bf716e.slice/crio-1135abf52c98f33df9cc3a5ffd7813342036640992c8b4390bb42ca18b01c376 WatchSource:0}: Error finding container 1135abf52c98f33df9cc3a5ffd7813342036640992c8b4390bb42ca18b01c376: Status 404 returned error can't find the container with id 1135abf52c98f33df9cc3a5ffd7813342036640992c8b4390bb42ca18b01c376 Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.096215 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.226930 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kxlkt\" (UniqueName: \"kubernetes.io/projected/55cf337c-c5d7-48cb-a18f-8c926b8c77e1-kube-api-access-kxlkt\") pod \"55cf337c-c5d7-48cb-a18f-8c926b8c77e1\" (UID: \"55cf337c-c5d7-48cb-a18f-8c926b8c77e1\") " Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.227045 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55cf337c-c5d7-48cb-a18f-8c926b8c77e1-config-data\") pod \"55cf337c-c5d7-48cb-a18f-8c926b8c77e1\" (UID: \"55cf337c-c5d7-48cb-a18f-8c926b8c77e1\") " Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.227104 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55cf337c-c5d7-48cb-a18f-8c926b8c77e1-combined-ca-bundle\") pod \"55cf337c-c5d7-48cb-a18f-8c926b8c77e1\" (UID: \"55cf337c-c5d7-48cb-a18f-8c926b8c77e1\") " Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.237332 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55cf337c-c5d7-48cb-a18f-8c926b8c77e1-kube-api-access-kxlkt" (OuterVolumeSpecName: "kube-api-access-kxlkt") pod "55cf337c-c5d7-48cb-a18f-8c926b8c77e1" (UID: "55cf337c-c5d7-48cb-a18f-8c926b8c77e1"). InnerVolumeSpecName "kube-api-access-kxlkt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.250986 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55cf337c-c5d7-48cb-a18f-8c926b8c77e1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "55cf337c-c5d7-48cb-a18f-8c926b8c77e1" (UID: "55cf337c-c5d7-48cb-a18f-8c926b8c77e1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.253964 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55cf337c-c5d7-48cb-a18f-8c926b8c77e1-config-data" (OuterVolumeSpecName: "config-data") pod "55cf337c-c5d7-48cb-a18f-8c926b8c77e1" (UID: "55cf337c-c5d7-48cb-a18f-8c926b8c77e1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.329632 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kxlkt\" (UniqueName: \"kubernetes.io/projected/55cf337c-c5d7-48cb-a18f-8c926b8c77e1-kube-api-access-kxlkt\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.329669 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55cf337c-c5d7-48cb-a18f-8c926b8c77e1-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.329679 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55cf337c-c5d7-48cb-a18f-8c926b8c77e1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:20 crc kubenswrapper[4910]: E0105 23:26:20.343955 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="d242f96dae00210855e1800d77cb6e0950b1c100e7d8a4ad68caec5aa3ed7fd9" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 05 23:26:20 crc kubenswrapper[4910]: E0105 23:26:20.346023 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="d242f96dae00210855e1800d77cb6e0950b1c100e7d8a4ad68caec5aa3ed7fd9" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 05 23:26:20 crc kubenswrapper[4910]: E0105 23:26:20.347572 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="d242f96dae00210855e1800d77cb6e0950b1c100e7d8a4ad68caec5aa3ed7fd9" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 05 23:26:20 crc kubenswrapper[4910]: E0105 23:26:20.347665 4910 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="776877a4-7fc0-449a-add4-ffe7357c90e2" containerName="nova-scheduler-scheduler" Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.687477 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b4ff4c84-7f6f-4bf8-a602-bbe609bf14c3","Type":"ContainerStarted","Data":"a94fc98fd11204a387414564471340d352ee30c871169ba96c7a45f258284826"} Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.688061 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b4ff4c84-7f6f-4bf8-a602-bbe609bf14c3","Type":"ContainerStarted","Data":"2305648285e0dbb7d815714fefafa577ae49f676b796ca42cf163a5897c3d9a7"} Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.688083 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b4ff4c84-7f6f-4bf8-a602-bbe609bf14c3","Type":"ContainerStarted","Data":"38cfb3c6f4e2d2d09c0116f08588c5089e22e53a2a5fdbcf70a92663b68f2641"} Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.692887 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d6f7b35f-ff01-4fa9-b914-7735a6bf716e","Type":"ContainerStarted","Data":"3b6834be0e96bb67a54bf45061cdb36fcce4b74b64cc07816fbb2ad84909e6db"} Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.692949 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d6f7b35f-ff01-4fa9-b914-7735a6bf716e","Type":"ContainerStarted","Data":"63fbf75aa731dc9910f907570eba55aecf18bf0f8a616eab4e0985ea4ac911b6"} Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.692969 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d6f7b35f-ff01-4fa9-b914-7735a6bf716e","Type":"ContainerStarted","Data":"1135abf52c98f33df9cc3a5ffd7813342036640992c8b4390bb42ca18b01c376"} Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.695618 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"55cf337c-c5d7-48cb-a18f-8c926b8c77e1","Type":"ContainerDied","Data":"d0df26d023660806b38b53486b449157eaf4c079ed7440f804e6b4f67dfb34b2"} Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.695689 4910 scope.go:117] "RemoveContainer" containerID="9a0e9ef7374f1bd2d5f92b2f62ba2f4b630ee553ebbe4f5c8b1de8c0a545a91c" Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.695646 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.722243 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.722222251 podStartE2EDuration="2.722222251s" podCreationTimestamp="2026-01-05 23:26:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:26:20.708093321 +0000 UTC m=+5712.285590991" watchObservedRunningTime="2026-01-05 23:26:20.722222251 +0000 UTC m=+5712.299719921" Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.745039 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.7450163659999998 podStartE2EDuration="2.745016366s" podCreationTimestamp="2026-01-05 23:26:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:26:20.735836848 +0000 UTC m=+5712.313334528" watchObservedRunningTime="2026-01-05 23:26:20.745016366 +0000 UTC m=+5712.322514036" Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.815028 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="02bc116b-3352-4eb9-9c44-1283f355e711" path="/var/lib/kubelet/pods/02bc116b-3352-4eb9-9c44-1283f355e711/volumes" Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.816011 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee48c3fa-ab41-4e07-ba38-f7195ac868e0" path="/var/lib/kubelet/pods/ee48c3fa-ab41-4e07-ba38-f7195ac868e0/volumes" Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.831785 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.861046 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.870266 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 05 23:26:20 crc kubenswrapper[4910]: E0105 23:26:20.870790 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55cf337c-c5d7-48cb-a18f-8c926b8c77e1" containerName="nova-cell1-conductor-conductor" Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.870815 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="55cf337c-c5d7-48cb-a18f-8c926b8c77e1" containerName="nova-cell1-conductor-conductor" Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.871027 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="55cf337c-c5d7-48cb-a18f-8c926b8c77e1" containerName="nova-cell1-conductor-conductor" Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.872905 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.879942 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.880679 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.942522 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25abe292-c3e5-41cf-956e-c69b86c10ba1-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"25abe292-c3e5-41cf-956e-c69b86c10ba1\") " pod="openstack/nova-cell1-conductor-0" Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.942593 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4kpk\" (UniqueName: \"kubernetes.io/projected/25abe292-c3e5-41cf-956e-c69b86c10ba1-kube-api-access-s4kpk\") pod \"nova-cell1-conductor-0\" (UID: \"25abe292-c3e5-41cf-956e-c69b86c10ba1\") " pod="openstack/nova-cell1-conductor-0" Jan 05 23:26:20 crc kubenswrapper[4910]: I0105 23:26:20.942707 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25abe292-c3e5-41cf-956e-c69b86c10ba1-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"25abe292-c3e5-41cf-956e-c69b86c10ba1\") " pod="openstack/nova-cell1-conductor-0" Jan 05 23:26:21 crc kubenswrapper[4910]: I0105 23:26:21.025475 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 05 23:26:21 crc kubenswrapper[4910]: I0105 23:26:21.044353 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25abe292-c3e5-41cf-956e-c69b86c10ba1-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"25abe292-c3e5-41cf-956e-c69b86c10ba1\") " pod="openstack/nova-cell1-conductor-0" Jan 05 23:26:21 crc kubenswrapper[4910]: I0105 23:26:21.044444 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25abe292-c3e5-41cf-956e-c69b86c10ba1-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"25abe292-c3e5-41cf-956e-c69b86c10ba1\") " pod="openstack/nova-cell1-conductor-0" Jan 05 23:26:21 crc kubenswrapper[4910]: I0105 23:26:21.044469 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4kpk\" (UniqueName: \"kubernetes.io/projected/25abe292-c3e5-41cf-956e-c69b86c10ba1-kube-api-access-s4kpk\") pod \"nova-cell1-conductor-0\" (UID: \"25abe292-c3e5-41cf-956e-c69b86c10ba1\") " pod="openstack/nova-cell1-conductor-0" Jan 05 23:26:21 crc kubenswrapper[4910]: I0105 23:26:21.052696 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25abe292-c3e5-41cf-956e-c69b86c10ba1-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"25abe292-c3e5-41cf-956e-c69b86c10ba1\") " pod="openstack/nova-cell1-conductor-0" Jan 05 23:26:21 crc kubenswrapper[4910]: I0105 23:26:21.055074 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25abe292-c3e5-41cf-956e-c69b86c10ba1-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"25abe292-c3e5-41cf-956e-c69b86c10ba1\") " pod="openstack/nova-cell1-conductor-0" Jan 05 23:26:21 crc kubenswrapper[4910]: I0105 23:26:21.073786 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4kpk\" (UniqueName: \"kubernetes.io/projected/25abe292-c3e5-41cf-956e-c69b86c10ba1-kube-api-access-s4kpk\") pod \"nova-cell1-conductor-0\" (UID: \"25abe292-c3e5-41cf-956e-c69b86c10ba1\") " pod="openstack/nova-cell1-conductor-0" Jan 05 23:26:21 crc kubenswrapper[4910]: I0105 23:26:21.197617 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 05 23:26:21 crc kubenswrapper[4910]: I0105 23:26:21.744780 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 05 23:26:22 crc kubenswrapper[4910]: I0105 23:26:22.734078 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55cf337c-c5d7-48cb-a18f-8c926b8c77e1" path="/var/lib/kubelet/pods/55cf337c-c5d7-48cb-a18f-8c926b8c77e1/volumes" Jan 05 23:26:22 crc kubenswrapper[4910]: I0105 23:26:22.734942 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"25abe292-c3e5-41cf-956e-c69b86c10ba1","Type":"ContainerStarted","Data":"254442b64711ce67f7a1640721a46a066209773e6496359dbca4a70f7902a670"} Jan 05 23:26:22 crc kubenswrapper[4910]: I0105 23:26:22.734987 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Jan 05 23:26:22 crc kubenswrapper[4910]: I0105 23:26:22.734999 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"25abe292-c3e5-41cf-956e-c69b86c10ba1","Type":"ContainerStarted","Data":"ce7154030535b7c27ecb640bb0156b0449aeaac3e24da2bfeeee8b6fd5023d62"} Jan 05 23:26:22 crc kubenswrapper[4910]: I0105 23:26:22.754447 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.7544217140000002 podStartE2EDuration="2.754421714s" podCreationTimestamp="2026-01-05 23:26:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:26:22.749321577 +0000 UTC m=+5714.326819247" watchObservedRunningTime="2026-01-05 23:26:22.754421714 +0000 UTC m=+5714.331919384" Jan 05 23:26:23 crc kubenswrapper[4910]: I0105 23:26:23.080086 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Jan 05 23:26:24 crc kubenswrapper[4910]: I0105 23:26:24.149748 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 05 23:26:24 crc kubenswrapper[4910]: I0105 23:26:24.150029 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 05 23:26:24 crc kubenswrapper[4910]: I0105 23:26:24.465571 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 05 23:26:24 crc kubenswrapper[4910]: I0105 23:26:24.568228 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4hvnv\" (UniqueName: \"kubernetes.io/projected/776877a4-7fc0-449a-add4-ffe7357c90e2-kube-api-access-4hvnv\") pod \"776877a4-7fc0-449a-add4-ffe7357c90e2\" (UID: \"776877a4-7fc0-449a-add4-ffe7357c90e2\") " Jan 05 23:26:24 crc kubenswrapper[4910]: I0105 23:26:24.568281 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/776877a4-7fc0-449a-add4-ffe7357c90e2-combined-ca-bundle\") pod \"776877a4-7fc0-449a-add4-ffe7357c90e2\" (UID: \"776877a4-7fc0-449a-add4-ffe7357c90e2\") " Jan 05 23:26:24 crc kubenswrapper[4910]: I0105 23:26:24.568324 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/776877a4-7fc0-449a-add4-ffe7357c90e2-config-data\") pod \"776877a4-7fc0-449a-add4-ffe7357c90e2\" (UID: \"776877a4-7fc0-449a-add4-ffe7357c90e2\") " Jan 05 23:26:24 crc kubenswrapper[4910]: I0105 23:26:24.578058 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/776877a4-7fc0-449a-add4-ffe7357c90e2-kube-api-access-4hvnv" (OuterVolumeSpecName: "kube-api-access-4hvnv") pod "776877a4-7fc0-449a-add4-ffe7357c90e2" (UID: "776877a4-7fc0-449a-add4-ffe7357c90e2"). InnerVolumeSpecName "kube-api-access-4hvnv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:26:24 crc kubenswrapper[4910]: E0105 23:26:24.595517 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/776877a4-7fc0-449a-add4-ffe7357c90e2-combined-ca-bundle podName:776877a4-7fc0-449a-add4-ffe7357c90e2 nodeName:}" failed. No retries permitted until 2026-01-05 23:26:25.095475233 +0000 UTC m=+5716.672972903 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "combined-ca-bundle" (UniqueName: "kubernetes.io/secret/776877a4-7fc0-449a-add4-ffe7357c90e2-combined-ca-bundle") pod "776877a4-7fc0-449a-add4-ffe7357c90e2" (UID: "776877a4-7fc0-449a-add4-ffe7357c90e2") : error deleting /var/lib/kubelet/pods/776877a4-7fc0-449a-add4-ffe7357c90e2/volume-subpaths: remove /var/lib/kubelet/pods/776877a4-7fc0-449a-add4-ffe7357c90e2/volume-subpaths: no such file or directory Jan 05 23:26:24 crc kubenswrapper[4910]: I0105 23:26:24.599574 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/776877a4-7fc0-449a-add4-ffe7357c90e2-config-data" (OuterVolumeSpecName: "config-data") pod "776877a4-7fc0-449a-add4-ffe7357c90e2" (UID: "776877a4-7fc0-449a-add4-ffe7357c90e2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:26:24 crc kubenswrapper[4910]: I0105 23:26:24.670802 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4hvnv\" (UniqueName: \"kubernetes.io/projected/776877a4-7fc0-449a-add4-ffe7357c90e2-kube-api-access-4hvnv\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:24 crc kubenswrapper[4910]: I0105 23:26:24.671029 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/776877a4-7fc0-449a-add4-ffe7357c90e2-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:24 crc kubenswrapper[4910]: I0105 23:26:24.752011 4910 generic.go:334] "Generic (PLEG): container finished" podID="776877a4-7fc0-449a-add4-ffe7357c90e2" containerID="d242f96dae00210855e1800d77cb6e0950b1c100e7d8a4ad68caec5aa3ed7fd9" exitCode=0 Jan 05 23:26:24 crc kubenswrapper[4910]: I0105 23:26:24.752144 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 05 23:26:24 crc kubenswrapper[4910]: I0105 23:26:24.752708 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"776877a4-7fc0-449a-add4-ffe7357c90e2","Type":"ContainerDied","Data":"d242f96dae00210855e1800d77cb6e0950b1c100e7d8a4ad68caec5aa3ed7fd9"} Jan 05 23:26:24 crc kubenswrapper[4910]: I0105 23:26:24.752759 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"776877a4-7fc0-449a-add4-ffe7357c90e2","Type":"ContainerDied","Data":"3ebc72a4cd08b688dd86c725f996a755cced7f7528fa061ba4e5c5f8b5c3e2f7"} Jan 05 23:26:24 crc kubenswrapper[4910]: I0105 23:26:24.752792 4910 scope.go:117] "RemoveContainer" containerID="d242f96dae00210855e1800d77cb6e0950b1c100e7d8a4ad68caec5aa3ed7fd9" Jan 05 23:26:24 crc kubenswrapper[4910]: I0105 23:26:24.786358 4910 scope.go:117] "RemoveContainer" containerID="d242f96dae00210855e1800d77cb6e0950b1c100e7d8a4ad68caec5aa3ed7fd9" Jan 05 23:26:24 crc kubenswrapper[4910]: E0105 23:26:24.787041 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d242f96dae00210855e1800d77cb6e0950b1c100e7d8a4ad68caec5aa3ed7fd9\": container with ID starting with d242f96dae00210855e1800d77cb6e0950b1c100e7d8a4ad68caec5aa3ed7fd9 not found: ID does not exist" containerID="d242f96dae00210855e1800d77cb6e0950b1c100e7d8a4ad68caec5aa3ed7fd9" Jan 05 23:26:24 crc kubenswrapper[4910]: I0105 23:26:24.787099 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d242f96dae00210855e1800d77cb6e0950b1c100e7d8a4ad68caec5aa3ed7fd9"} err="failed to get container status \"d242f96dae00210855e1800d77cb6e0950b1c100e7d8a4ad68caec5aa3ed7fd9\": rpc error: code = NotFound desc = could not find container \"d242f96dae00210855e1800d77cb6e0950b1c100e7d8a4ad68caec5aa3ed7fd9\": container with ID starting with d242f96dae00210855e1800d77cb6e0950b1c100e7d8a4ad68caec5aa3ed7fd9 not found: ID does not exist" Jan 05 23:26:25 crc kubenswrapper[4910]: I0105 23:26:25.179345 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/776877a4-7fc0-449a-add4-ffe7357c90e2-combined-ca-bundle\") pod \"776877a4-7fc0-449a-add4-ffe7357c90e2\" (UID: \"776877a4-7fc0-449a-add4-ffe7357c90e2\") " Jan 05 23:26:25 crc kubenswrapper[4910]: I0105 23:26:25.187472 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/776877a4-7fc0-449a-add4-ffe7357c90e2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "776877a4-7fc0-449a-add4-ffe7357c90e2" (UID: "776877a4-7fc0-449a-add4-ffe7357c90e2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:26:25 crc kubenswrapper[4910]: I0105 23:26:25.282083 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/776877a4-7fc0-449a-add4-ffe7357c90e2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:25 crc kubenswrapper[4910]: I0105 23:26:25.404558 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 23:26:25 crc kubenswrapper[4910]: I0105 23:26:25.417049 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 23:26:25 crc kubenswrapper[4910]: I0105 23:26:25.428812 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 23:26:25 crc kubenswrapper[4910]: E0105 23:26:25.429303 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="776877a4-7fc0-449a-add4-ffe7357c90e2" containerName="nova-scheduler-scheduler" Jan 05 23:26:25 crc kubenswrapper[4910]: I0105 23:26:25.429325 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="776877a4-7fc0-449a-add4-ffe7357c90e2" containerName="nova-scheduler-scheduler" Jan 05 23:26:25 crc kubenswrapper[4910]: I0105 23:26:25.429565 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="776877a4-7fc0-449a-add4-ffe7357c90e2" containerName="nova-scheduler-scheduler" Jan 05 23:26:25 crc kubenswrapper[4910]: I0105 23:26:25.430736 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 05 23:26:25 crc kubenswrapper[4910]: I0105 23:26:25.435595 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 05 23:26:25 crc kubenswrapper[4910]: I0105 23:26:25.447465 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 23:26:25 crc kubenswrapper[4910]: I0105 23:26:25.596613 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/daf56c5f-975d-4fd6-bf96-852ddc0b476b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"daf56c5f-975d-4fd6-bf96-852ddc0b476b\") " pod="openstack/nova-scheduler-0" Jan 05 23:26:25 crc kubenswrapper[4910]: I0105 23:26:25.597178 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/daf56c5f-975d-4fd6-bf96-852ddc0b476b-config-data\") pod \"nova-scheduler-0\" (UID: \"daf56c5f-975d-4fd6-bf96-852ddc0b476b\") " pod="openstack/nova-scheduler-0" Jan 05 23:26:25 crc kubenswrapper[4910]: I0105 23:26:25.597366 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4dptg\" (UniqueName: \"kubernetes.io/projected/daf56c5f-975d-4fd6-bf96-852ddc0b476b-kube-api-access-4dptg\") pod \"nova-scheduler-0\" (UID: \"daf56c5f-975d-4fd6-bf96-852ddc0b476b\") " pod="openstack/nova-scheduler-0" Jan 05 23:26:25 crc kubenswrapper[4910]: I0105 23:26:25.699581 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4dptg\" (UniqueName: \"kubernetes.io/projected/daf56c5f-975d-4fd6-bf96-852ddc0b476b-kube-api-access-4dptg\") pod \"nova-scheduler-0\" (UID: \"daf56c5f-975d-4fd6-bf96-852ddc0b476b\") " pod="openstack/nova-scheduler-0" Jan 05 23:26:25 crc kubenswrapper[4910]: I0105 23:26:25.699667 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/daf56c5f-975d-4fd6-bf96-852ddc0b476b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"daf56c5f-975d-4fd6-bf96-852ddc0b476b\") " pod="openstack/nova-scheduler-0" Jan 05 23:26:25 crc kubenswrapper[4910]: I0105 23:26:25.699729 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/daf56c5f-975d-4fd6-bf96-852ddc0b476b-config-data\") pod \"nova-scheduler-0\" (UID: \"daf56c5f-975d-4fd6-bf96-852ddc0b476b\") " pod="openstack/nova-scheduler-0" Jan 05 23:26:25 crc kubenswrapper[4910]: I0105 23:26:25.704832 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/daf56c5f-975d-4fd6-bf96-852ddc0b476b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"daf56c5f-975d-4fd6-bf96-852ddc0b476b\") " pod="openstack/nova-scheduler-0" Jan 05 23:26:25 crc kubenswrapper[4910]: I0105 23:26:25.708737 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/daf56c5f-975d-4fd6-bf96-852ddc0b476b-config-data\") pod \"nova-scheduler-0\" (UID: \"daf56c5f-975d-4fd6-bf96-852ddc0b476b\") " pod="openstack/nova-scheduler-0" Jan 05 23:26:25 crc kubenswrapper[4910]: I0105 23:26:25.732398 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4dptg\" (UniqueName: \"kubernetes.io/projected/daf56c5f-975d-4fd6-bf96-852ddc0b476b-kube-api-access-4dptg\") pod \"nova-scheduler-0\" (UID: \"daf56c5f-975d-4fd6-bf96-852ddc0b476b\") " pod="openstack/nova-scheduler-0" Jan 05 23:26:25 crc kubenswrapper[4910]: I0105 23:26:25.781397 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 05 23:26:26 crc kubenswrapper[4910]: I0105 23:26:26.021334 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Jan 05 23:26:26 crc kubenswrapper[4910]: I0105 23:26:26.038038 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Jan 05 23:26:26 crc kubenswrapper[4910]: I0105 23:26:26.246499 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Jan 05 23:26:26 crc kubenswrapper[4910]: W0105 23:26:26.271326 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddaf56c5f_975d_4fd6_bf96_852ddc0b476b.slice/crio-fb85e9a31b1fcc48fad66a05269938e840f24a5f3ae385ab6ae5515019d1ab61 WatchSource:0}: Error finding container fb85e9a31b1fcc48fad66a05269938e840f24a5f3ae385ab6ae5515019d1ab61: Status 404 returned error can't find the container with id fb85e9a31b1fcc48fad66a05269938e840f24a5f3ae385ab6ae5515019d1ab61 Jan 05 23:26:26 crc kubenswrapper[4910]: I0105 23:26:26.277054 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 05 23:26:26 crc kubenswrapper[4910]: I0105 23:26:26.730840 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="776877a4-7fc0-449a-add4-ffe7357c90e2" path="/var/lib/kubelet/pods/776877a4-7fc0-449a-add4-ffe7357c90e2/volumes" Jan 05 23:26:26 crc kubenswrapper[4910]: I0105 23:26:26.780445 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"daf56c5f-975d-4fd6-bf96-852ddc0b476b","Type":"ContainerStarted","Data":"ff9ce8b25664139d5144b30730229549bb47045c25905052919c04418fd63db9"} Jan 05 23:26:26 crc kubenswrapper[4910]: I0105 23:26:26.781725 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"daf56c5f-975d-4fd6-bf96-852ddc0b476b","Type":"ContainerStarted","Data":"fb85e9a31b1fcc48fad66a05269938e840f24a5f3ae385ab6ae5515019d1ab61"} Jan 05 23:26:26 crc kubenswrapper[4910]: I0105 23:26:26.790843 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Jan 05 23:26:26 crc kubenswrapper[4910]: I0105 23:26:26.797356 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=1.7973322980000002 podStartE2EDuration="1.797332298s" podCreationTimestamp="2026-01-05 23:26:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:26:26.796076447 +0000 UTC m=+5718.373574117" watchObservedRunningTime="2026-01-05 23:26:26.797332298 +0000 UTC m=+5718.374829968" Jan 05 23:26:29 crc kubenswrapper[4910]: I0105 23:26:29.141793 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 05 23:26:29 crc kubenswrapper[4910]: I0105 23:26:29.141903 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 05 23:26:29 crc kubenswrapper[4910]: I0105 23:26:29.149704 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 05 23:26:29 crc kubenswrapper[4910]: I0105 23:26:29.149778 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 05 23:26:30 crc kubenswrapper[4910]: I0105 23:26:30.307547 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="d6f7b35f-ff01-4fa9-b914-7735a6bf716e" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.87:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 05 23:26:30 crc kubenswrapper[4910]: I0105 23:26:30.307595 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="b4ff4c84-7f6f-4bf8-a602-bbe609bf14c3" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.86:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 05 23:26:30 crc kubenswrapper[4910]: I0105 23:26:30.307615 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="b4ff4c84-7f6f-4bf8-a602-bbe609bf14c3" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.86:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 05 23:26:30 crc kubenswrapper[4910]: I0105 23:26:30.307655 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="d6f7b35f-ff01-4fa9-b914-7735a6bf716e" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.87:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 05 23:26:30 crc kubenswrapper[4910]: I0105 23:26:30.781979 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 05 23:26:33 crc kubenswrapper[4910]: I0105 23:26:33.562673 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 05 23:26:33 crc kubenswrapper[4910]: I0105 23:26:33.590211 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 05 23:26:33 crc kubenswrapper[4910]: I0105 23:26:33.590561 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 05 23:26:33 crc kubenswrapper[4910]: I0105 23:26:33.593945 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 05 23:26:33 crc kubenswrapper[4910]: I0105 23:26:33.681428 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/30233c9b-ade9-41cf-ab01-b40994eaeb8d-scripts\") pod \"cinder-scheduler-0\" (UID: \"30233c9b-ade9-41cf-ab01-b40994eaeb8d\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:33 crc kubenswrapper[4910]: I0105 23:26:33.681507 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30233c9b-ade9-41cf-ab01-b40994eaeb8d-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"30233c9b-ade9-41cf-ab01-b40994eaeb8d\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:33 crc kubenswrapper[4910]: I0105 23:26:33.681568 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/30233c9b-ade9-41cf-ab01-b40994eaeb8d-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"30233c9b-ade9-41cf-ab01-b40994eaeb8d\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:33 crc kubenswrapper[4910]: I0105 23:26:33.681613 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4zf77\" (UniqueName: \"kubernetes.io/projected/30233c9b-ade9-41cf-ab01-b40994eaeb8d-kube-api-access-4zf77\") pod \"cinder-scheduler-0\" (UID: \"30233c9b-ade9-41cf-ab01-b40994eaeb8d\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:33 crc kubenswrapper[4910]: I0105 23:26:33.681737 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30233c9b-ade9-41cf-ab01-b40994eaeb8d-config-data\") pod \"cinder-scheduler-0\" (UID: \"30233c9b-ade9-41cf-ab01-b40994eaeb8d\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:33 crc kubenswrapper[4910]: I0105 23:26:33.681820 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/30233c9b-ade9-41cf-ab01-b40994eaeb8d-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"30233c9b-ade9-41cf-ab01-b40994eaeb8d\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:33 crc kubenswrapper[4910]: I0105 23:26:33.784540 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30233c9b-ade9-41cf-ab01-b40994eaeb8d-config-data\") pod \"cinder-scheduler-0\" (UID: \"30233c9b-ade9-41cf-ab01-b40994eaeb8d\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:33 crc kubenswrapper[4910]: I0105 23:26:33.784669 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/30233c9b-ade9-41cf-ab01-b40994eaeb8d-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"30233c9b-ade9-41cf-ab01-b40994eaeb8d\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:33 crc kubenswrapper[4910]: I0105 23:26:33.784854 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/30233c9b-ade9-41cf-ab01-b40994eaeb8d-scripts\") pod \"cinder-scheduler-0\" (UID: \"30233c9b-ade9-41cf-ab01-b40994eaeb8d\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:33 crc kubenswrapper[4910]: I0105 23:26:33.784885 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30233c9b-ade9-41cf-ab01-b40994eaeb8d-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"30233c9b-ade9-41cf-ab01-b40994eaeb8d\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:33 crc kubenswrapper[4910]: I0105 23:26:33.784959 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/30233c9b-ade9-41cf-ab01-b40994eaeb8d-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"30233c9b-ade9-41cf-ab01-b40994eaeb8d\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:33 crc kubenswrapper[4910]: I0105 23:26:33.784998 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4zf77\" (UniqueName: \"kubernetes.io/projected/30233c9b-ade9-41cf-ab01-b40994eaeb8d-kube-api-access-4zf77\") pod \"cinder-scheduler-0\" (UID: \"30233c9b-ade9-41cf-ab01-b40994eaeb8d\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:33 crc kubenswrapper[4910]: I0105 23:26:33.786113 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/30233c9b-ade9-41cf-ab01-b40994eaeb8d-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"30233c9b-ade9-41cf-ab01-b40994eaeb8d\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:33 crc kubenswrapper[4910]: I0105 23:26:33.791757 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30233c9b-ade9-41cf-ab01-b40994eaeb8d-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"30233c9b-ade9-41cf-ab01-b40994eaeb8d\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:33 crc kubenswrapper[4910]: I0105 23:26:33.792026 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/30233c9b-ade9-41cf-ab01-b40994eaeb8d-scripts\") pod \"cinder-scheduler-0\" (UID: \"30233c9b-ade9-41cf-ab01-b40994eaeb8d\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:33 crc kubenswrapper[4910]: I0105 23:26:33.796605 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30233c9b-ade9-41cf-ab01-b40994eaeb8d-config-data\") pod \"cinder-scheduler-0\" (UID: \"30233c9b-ade9-41cf-ab01-b40994eaeb8d\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:33 crc kubenswrapper[4910]: I0105 23:26:33.803664 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/30233c9b-ade9-41cf-ab01-b40994eaeb8d-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"30233c9b-ade9-41cf-ab01-b40994eaeb8d\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:33 crc kubenswrapper[4910]: I0105 23:26:33.817051 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4zf77\" (UniqueName: \"kubernetes.io/projected/30233c9b-ade9-41cf-ab01-b40994eaeb8d-kube-api-access-4zf77\") pod \"cinder-scheduler-0\" (UID: \"30233c9b-ade9-41cf-ab01-b40994eaeb8d\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:33 crc kubenswrapper[4910]: I0105 23:26:33.921598 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 05 23:26:34 crc kubenswrapper[4910]: I0105 23:26:34.494665 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 05 23:26:34 crc kubenswrapper[4910]: I0105 23:26:34.910732 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"30233c9b-ade9-41cf-ab01-b40994eaeb8d","Type":"ContainerStarted","Data":"056a5b397058e128a9fc0063afdafc98870f47ed3d8f21e60b30e438c01ea2fe"} Jan 05 23:26:34 crc kubenswrapper[4910]: I0105 23:26:34.960550 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 05 23:26:34 crc kubenswrapper[4910]: I0105 23:26:34.960797 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="cd8216dd-10b4-41f8-bc8f-ef7437020264" containerName="cinder-api-log" containerID="cri-o://1416900f8247c2d8e0d8ae1e0e6034eb2594d0fd195bc3b5000a43dc46be1439" gracePeriod=30 Jan 05 23:26:34 crc kubenswrapper[4910]: I0105 23:26:34.961071 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="cd8216dd-10b4-41f8-bc8f-ef7437020264" containerName="cinder-api" containerID="cri-o://35e9ed7907bc2a1dee5ec6488324b2493fec8f5acdb20739fa960009854624d1" gracePeriod=30 Jan 05 23:26:35 crc kubenswrapper[4910]: I0105 23:26:35.783888 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 05 23:26:35 crc kubenswrapper[4910]: I0105 23:26:35.904866 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 05 23:26:35 crc kubenswrapper[4910]: I0105 23:26:35.932440 4910 generic.go:334] "Generic (PLEG): container finished" podID="cd8216dd-10b4-41f8-bc8f-ef7437020264" containerID="1416900f8247c2d8e0d8ae1e0e6034eb2594d0fd195bc3b5000a43dc46be1439" exitCode=143 Jan 05 23:26:35 crc kubenswrapper[4910]: I0105 23:26:35.932544 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"cd8216dd-10b4-41f8-bc8f-ef7437020264","Type":"ContainerDied","Data":"1416900f8247c2d8e0d8ae1e0e6034eb2594d0fd195bc3b5000a43dc46be1439"} Jan 05 23:26:35 crc kubenswrapper[4910]: I0105 23:26:35.944054 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"30233c9b-ade9-41cf-ab01-b40994eaeb8d","Type":"ContainerStarted","Data":"16b3e2f84d6e576749c4d388bdfe47c72e240020cde87f3798b3c95cd156b736"} Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.028298 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.036898 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-volume-volume1-0"] Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.038712 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.053072 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-volume-volume1-config-data" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.068285 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume1-0"] Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.154398 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.154443 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.154462 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.154483 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-sys\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.154503 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.154541 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.154560 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.154577 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qtjl2\" (UniqueName: \"kubernetes.io/projected/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-kube-api-access-qtjl2\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.154599 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-dev\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.154637 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.154666 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-run\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.154697 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.154724 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.154743 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.154770 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.154787 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.256554 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.256597 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.256649 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.256667 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.256682 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.256700 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-sys\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.256721 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.256744 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.256763 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.256782 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qtjl2\" (UniqueName: \"kubernetes.io/projected/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-kube-api-access-qtjl2\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.256801 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-dev\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.256838 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.256866 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-run\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.256891 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.256918 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.256934 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.257087 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.257135 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.257379 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.259941 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-run\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.260020 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.262277 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.262987 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-sys\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.263257 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-dev\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.263816 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.263925 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.264609 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.266919 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.271623 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.272302 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.274645 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.293583 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qtjl2\" (UniqueName: \"kubernetes.io/projected/9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe-kube-api-access-qtjl2\") pod \"cinder-volume-volume1-0\" (UID: \"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe\") " pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.391496 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.692685 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-backup-0"] Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.703068 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.706069 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-backup-config-data" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.712104 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.868012 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-config-data-custom\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.868063 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.868089 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.868229 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-lib-modules\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.868453 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-run\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.868516 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-ceph\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.868588 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-scripts\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.868606 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-etc-nvme\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.868914 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pbp7t\" (UniqueName: \"kubernetes.io/projected/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-kube-api-access-pbp7t\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.869026 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.869173 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-config-data\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.869219 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-sys\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.869256 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.869274 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.869318 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-dev\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.869433 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.957234 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"30233c9b-ade9-41cf-ab01-b40994eaeb8d","Type":"ContainerStarted","Data":"ab869ac5831a1316d67724a799045dc1a6522ced599238fd2ac236450bfd298e"} Jan 05 23:26:36 crc kubenswrapper[4910]: W0105 23:26:36.962940 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9c5acd7b_7c52_4ff4_b9f4_1f80bb1e4fbe.slice/crio-68bc1508f0d4a15867d8231df305d524316446f4046252f2c6eb6e17d3abd48f WatchSource:0}: Error finding container 68bc1508f0d4a15867d8231df305d524316446f4046252f2c6eb6e17d3abd48f: Status 404 returned error can't find the container with id 68bc1508f0d4a15867d8231df305d524316446f4046252f2c6eb6e17d3abd48f Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.967400 4910 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.971036 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.971091 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.971162 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-lib-modules\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.971210 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-run\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.971243 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-ceph\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.971277 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-scripts\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.971298 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-etc-nvme\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.971335 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pbp7t\" (UniqueName: \"kubernetes.io/projected/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-kube-api-access-pbp7t\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.971332 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.971373 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.971407 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-etc-nvme\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.971437 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-config-data\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.971466 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-sys\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.971491 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.971512 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.971540 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-dev\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.971574 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.971600 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-config-data-custom\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.972195 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-lib-modules\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.972254 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-run\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.972279 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-sys\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.972549 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.974015 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.974078 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.974105 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-dev\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.974150 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.981075 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.984104 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-config-data-custom\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.985090 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume1-0"] Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.994032 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-config-data\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.994936 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-scripts\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:36 crc kubenswrapper[4910]: I0105 23:26:36.996233 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-ceph\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:37 crc kubenswrapper[4910]: I0105 23:26:37.000463 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pbp7t\" (UniqueName: \"kubernetes.io/projected/4cb60132-8c1f-4d0f-9582-32e551e2f4f9-kube-api-access-pbp7t\") pod \"cinder-backup-0\" (UID: \"4cb60132-8c1f-4d0f-9582-32e551e2f4f9\") " pod="openstack/cinder-backup-0" Jan 05 23:26:37 crc kubenswrapper[4910]: I0105 23:26:37.001337 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.001307928 podStartE2EDuration="4.001307928s" podCreationTimestamp="2026-01-05 23:26:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:26:36.98967943 +0000 UTC m=+5728.567177100" watchObservedRunningTime="2026-01-05 23:26:37.001307928 +0000 UTC m=+5728.578805618" Jan 05 23:26:37 crc kubenswrapper[4910]: I0105 23:26:37.032491 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Jan 05 23:26:37 crc kubenswrapper[4910]: I0105 23:26:37.648050 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Jan 05 23:26:37 crc kubenswrapper[4910]: W0105 23:26:37.669808 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4cb60132_8c1f_4d0f_9582_32e551e2f4f9.slice/crio-51a10ed2abbda50920adc0da16c4066a90b92b3b38b4e5e71d68f8ce86cb7014 WatchSource:0}: Error finding container 51a10ed2abbda50920adc0da16c4066a90b92b3b38b4e5e71d68f8ce86cb7014: Status 404 returned error can't find the container with id 51a10ed2abbda50920adc0da16c4066a90b92b3b38b4e5e71d68f8ce86cb7014 Jan 05 23:26:37 crc kubenswrapper[4910]: I0105 23:26:37.980014 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"4cb60132-8c1f-4d0f-9582-32e551e2f4f9","Type":"ContainerStarted","Data":"51a10ed2abbda50920adc0da16c4066a90b92b3b38b4e5e71d68f8ce86cb7014"} Jan 05 23:26:37 crc kubenswrapper[4910]: I0105 23:26:37.988267 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe","Type":"ContainerStarted","Data":"68bc1508f0d4a15867d8231df305d524316446f4046252f2c6eb6e17d3abd48f"} Jan 05 23:26:38 crc kubenswrapper[4910]: I0105 23:26:38.142236 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="cd8216dd-10b4-41f8-bc8f-ef7437020264" containerName="cinder-api" probeResult="failure" output="Get \"http://10.217.1.83:8776/healthcheck\": read tcp 10.217.0.2:57436->10.217.1.83:8776: read: connection reset by peer" Jan 05 23:26:38 crc kubenswrapper[4910]: I0105 23:26:38.690809 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 05 23:26:38 crc kubenswrapper[4910]: I0105 23:26:38.816689 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cd8216dd-10b4-41f8-bc8f-ef7437020264-etc-machine-id\") pod \"cd8216dd-10b4-41f8-bc8f-ef7437020264\" (UID: \"cd8216dd-10b4-41f8-bc8f-ef7437020264\") " Jan 05 23:26:38 crc kubenswrapper[4910]: I0105 23:26:38.816740 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cd8216dd-10b4-41f8-bc8f-ef7437020264-scripts\") pod \"cd8216dd-10b4-41f8-bc8f-ef7437020264\" (UID: \"cd8216dd-10b4-41f8-bc8f-ef7437020264\") " Jan 05 23:26:38 crc kubenswrapper[4910]: I0105 23:26:38.816782 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cd8216dd-10b4-41f8-bc8f-ef7437020264-config-data-custom\") pod \"cd8216dd-10b4-41f8-bc8f-ef7437020264\" (UID: \"cd8216dd-10b4-41f8-bc8f-ef7437020264\") " Jan 05 23:26:38 crc kubenswrapper[4910]: I0105 23:26:38.816880 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd8216dd-10b4-41f8-bc8f-ef7437020264-combined-ca-bundle\") pod \"cd8216dd-10b4-41f8-bc8f-ef7437020264\" (UID: \"cd8216dd-10b4-41f8-bc8f-ef7437020264\") " Jan 05 23:26:38 crc kubenswrapper[4910]: I0105 23:26:38.816942 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd8216dd-10b4-41f8-bc8f-ef7437020264-config-data\") pod \"cd8216dd-10b4-41f8-bc8f-ef7437020264\" (UID: \"cd8216dd-10b4-41f8-bc8f-ef7437020264\") " Jan 05 23:26:38 crc kubenswrapper[4910]: I0105 23:26:38.816979 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sxsgh\" (UniqueName: \"kubernetes.io/projected/cd8216dd-10b4-41f8-bc8f-ef7437020264-kube-api-access-sxsgh\") pod \"cd8216dd-10b4-41f8-bc8f-ef7437020264\" (UID: \"cd8216dd-10b4-41f8-bc8f-ef7437020264\") " Jan 05 23:26:38 crc kubenswrapper[4910]: I0105 23:26:38.817076 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd8216dd-10b4-41f8-bc8f-ef7437020264-logs\") pod \"cd8216dd-10b4-41f8-bc8f-ef7437020264\" (UID: \"cd8216dd-10b4-41f8-bc8f-ef7437020264\") " Jan 05 23:26:38 crc kubenswrapper[4910]: I0105 23:26:38.821061 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd8216dd-10b4-41f8-bc8f-ef7437020264-logs" (OuterVolumeSpecName: "logs") pod "cd8216dd-10b4-41f8-bc8f-ef7437020264" (UID: "cd8216dd-10b4-41f8-bc8f-ef7437020264"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:26:38 crc kubenswrapper[4910]: I0105 23:26:38.822762 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cd8216dd-10b4-41f8-bc8f-ef7437020264-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "cd8216dd-10b4-41f8-bc8f-ef7437020264" (UID: "cd8216dd-10b4-41f8-bc8f-ef7437020264"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 23:26:38 crc kubenswrapper[4910]: I0105 23:26:38.827184 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd8216dd-10b4-41f8-bc8f-ef7437020264-scripts" (OuterVolumeSpecName: "scripts") pod "cd8216dd-10b4-41f8-bc8f-ef7437020264" (UID: "cd8216dd-10b4-41f8-bc8f-ef7437020264"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:26:38 crc kubenswrapper[4910]: I0105 23:26:38.830249 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd8216dd-10b4-41f8-bc8f-ef7437020264-kube-api-access-sxsgh" (OuterVolumeSpecName: "kube-api-access-sxsgh") pod "cd8216dd-10b4-41f8-bc8f-ef7437020264" (UID: "cd8216dd-10b4-41f8-bc8f-ef7437020264"). InnerVolumeSpecName "kube-api-access-sxsgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:26:38 crc kubenswrapper[4910]: I0105 23:26:38.842571 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd8216dd-10b4-41f8-bc8f-ef7437020264-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "cd8216dd-10b4-41f8-bc8f-ef7437020264" (UID: "cd8216dd-10b4-41f8-bc8f-ef7437020264"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:26:38 crc kubenswrapper[4910]: I0105 23:26:38.871091 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd8216dd-10b4-41f8-bc8f-ef7437020264-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cd8216dd-10b4-41f8-bc8f-ef7437020264" (UID: "cd8216dd-10b4-41f8-bc8f-ef7437020264"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:26:38 crc kubenswrapper[4910]: I0105 23:26:38.901665 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd8216dd-10b4-41f8-bc8f-ef7437020264-config-data" (OuterVolumeSpecName: "config-data") pod "cd8216dd-10b4-41f8-bc8f-ef7437020264" (UID: "cd8216dd-10b4-41f8-bc8f-ef7437020264"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:26:38 crc kubenswrapper[4910]: I0105 23:26:38.919951 4910 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cd8216dd-10b4-41f8-bc8f-ef7437020264-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:38 crc kubenswrapper[4910]: I0105 23:26:38.920021 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cd8216dd-10b4-41f8-bc8f-ef7437020264-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:38 crc kubenswrapper[4910]: I0105 23:26:38.920031 4910 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cd8216dd-10b4-41f8-bc8f-ef7437020264-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:38 crc kubenswrapper[4910]: I0105 23:26:38.920040 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd8216dd-10b4-41f8-bc8f-ef7437020264-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:38 crc kubenswrapper[4910]: I0105 23:26:38.920048 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd8216dd-10b4-41f8-bc8f-ef7437020264-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:38 crc kubenswrapper[4910]: I0105 23:26:38.920075 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sxsgh\" (UniqueName: \"kubernetes.io/projected/cd8216dd-10b4-41f8-bc8f-ef7437020264-kube-api-access-sxsgh\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:38 crc kubenswrapper[4910]: I0105 23:26:38.920086 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd8216dd-10b4-41f8-bc8f-ef7437020264-logs\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:38 crc kubenswrapper[4910]: I0105 23:26:38.925215 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 05 23:26:38 crc kubenswrapper[4910]: I0105 23:26:38.997398 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"4cb60132-8c1f-4d0f-9582-32e551e2f4f9","Type":"ContainerStarted","Data":"c9bd6a01558f969cc33d472dd202c8faca023d1969dd0f82928826f5baa40a08"} Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.000000 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe","Type":"ContainerStarted","Data":"8cf137b0101986aa19e97c1c1fa86c14313aa35bdf89c7d538aa08b5dc1f38b1"} Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.000025 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe","Type":"ContainerStarted","Data":"d06fe2e74273ae4730ce38405aeaef0bceacfa1a163a634749bb54ab894b3d5f"} Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.010572 4910 generic.go:334] "Generic (PLEG): container finished" podID="cd8216dd-10b4-41f8-bc8f-ef7437020264" containerID="35e9ed7907bc2a1dee5ec6488324b2493fec8f5acdb20739fa960009854624d1" exitCode=0 Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.010909 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.011225 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"cd8216dd-10b4-41f8-bc8f-ef7437020264","Type":"ContainerDied","Data":"35e9ed7907bc2a1dee5ec6488324b2493fec8f5acdb20739fa960009854624d1"} Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.011265 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"cd8216dd-10b4-41f8-bc8f-ef7437020264","Type":"ContainerDied","Data":"5878364884b3c4fec7564d8a1ed7b8e6122aed10fe0944baebfeddc4265715dc"} Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.011295 4910 scope.go:117] "RemoveContainer" containerID="35e9ed7907bc2a1dee5ec6488324b2493fec8f5acdb20739fa960009854624d1" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.044967 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-volume-volume1-0" podStartSLOduration=3.200617513 podStartE2EDuration="4.044949495s" podCreationTimestamp="2026-01-05 23:26:35 +0000 UTC" firstStartedPulling="2026-01-05 23:26:36.967206844 +0000 UTC m=+5728.544704514" lastFinishedPulling="2026-01-05 23:26:37.811538816 +0000 UTC m=+5729.389036496" observedRunningTime="2026-01-05 23:26:39.029622246 +0000 UTC m=+5730.607119916" watchObservedRunningTime="2026-01-05 23:26:39.044949495 +0000 UTC m=+5730.622447165" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.057078 4910 scope.go:117] "RemoveContainer" containerID="1416900f8247c2d8e0d8ae1e0e6034eb2594d0fd195bc3b5000a43dc46be1439" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.073397 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.085397 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.095107 4910 scope.go:117] "RemoveContainer" containerID="35e9ed7907bc2a1dee5ec6488324b2493fec8f5acdb20739fa960009854624d1" Jan 05 23:26:39 crc kubenswrapper[4910]: E0105 23:26:39.095700 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"35e9ed7907bc2a1dee5ec6488324b2493fec8f5acdb20739fa960009854624d1\": container with ID starting with 35e9ed7907bc2a1dee5ec6488324b2493fec8f5acdb20739fa960009854624d1 not found: ID does not exist" containerID="35e9ed7907bc2a1dee5ec6488324b2493fec8f5acdb20739fa960009854624d1" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.095734 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"35e9ed7907bc2a1dee5ec6488324b2493fec8f5acdb20739fa960009854624d1"} err="failed to get container status \"35e9ed7907bc2a1dee5ec6488324b2493fec8f5acdb20739fa960009854624d1\": rpc error: code = NotFound desc = could not find container \"35e9ed7907bc2a1dee5ec6488324b2493fec8f5acdb20739fa960009854624d1\": container with ID starting with 35e9ed7907bc2a1dee5ec6488324b2493fec8f5acdb20739fa960009854624d1 not found: ID does not exist" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.095755 4910 scope.go:117] "RemoveContainer" containerID="1416900f8247c2d8e0d8ae1e0e6034eb2594d0fd195bc3b5000a43dc46be1439" Jan 05 23:26:39 crc kubenswrapper[4910]: E0105 23:26:39.095998 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1416900f8247c2d8e0d8ae1e0e6034eb2594d0fd195bc3b5000a43dc46be1439\": container with ID starting with 1416900f8247c2d8e0d8ae1e0e6034eb2594d0fd195bc3b5000a43dc46be1439 not found: ID does not exist" containerID="1416900f8247c2d8e0d8ae1e0e6034eb2594d0fd195bc3b5000a43dc46be1439" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.096017 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1416900f8247c2d8e0d8ae1e0e6034eb2594d0fd195bc3b5000a43dc46be1439"} err="failed to get container status \"1416900f8247c2d8e0d8ae1e0e6034eb2594d0fd195bc3b5000a43dc46be1439\": rpc error: code = NotFound desc = could not find container \"1416900f8247c2d8e0d8ae1e0e6034eb2594d0fd195bc3b5000a43dc46be1439\": container with ID starting with 1416900f8247c2d8e0d8ae1e0e6034eb2594d0fd195bc3b5000a43dc46be1439 not found: ID does not exist" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.098718 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 05 23:26:39 crc kubenswrapper[4910]: E0105 23:26:39.099273 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd8216dd-10b4-41f8-bc8f-ef7437020264" containerName="cinder-api" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.099286 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd8216dd-10b4-41f8-bc8f-ef7437020264" containerName="cinder-api" Jan 05 23:26:39 crc kubenswrapper[4910]: E0105 23:26:39.099300 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd8216dd-10b4-41f8-bc8f-ef7437020264" containerName="cinder-api-log" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.099307 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd8216dd-10b4-41f8-bc8f-ef7437020264" containerName="cinder-api-log" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.099495 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd8216dd-10b4-41f8-bc8f-ef7437020264" containerName="cinder-api" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.099510 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd8216dd-10b4-41f8-bc8f-ef7437020264" containerName="cinder-api-log" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.100516 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.103655 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.107584 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.145718 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.145793 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.146732 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.147100 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.149396 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.150972 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.156580 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.158158 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.158672 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.232242 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18130341-5ea1-4803-9b04-4d8ccb122828-config-data\") pod \"cinder-api-0\" (UID: \"18130341-5ea1-4803-9b04-4d8ccb122828\") " pod="openstack/cinder-api-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.232359 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/18130341-5ea1-4803-9b04-4d8ccb122828-logs\") pod \"cinder-api-0\" (UID: \"18130341-5ea1-4803-9b04-4d8ccb122828\") " pod="openstack/cinder-api-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.232416 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mfk9k\" (UniqueName: \"kubernetes.io/projected/18130341-5ea1-4803-9b04-4d8ccb122828-kube-api-access-mfk9k\") pod \"cinder-api-0\" (UID: \"18130341-5ea1-4803-9b04-4d8ccb122828\") " pod="openstack/cinder-api-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.232489 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/18130341-5ea1-4803-9b04-4d8ccb122828-etc-machine-id\") pod \"cinder-api-0\" (UID: \"18130341-5ea1-4803-9b04-4d8ccb122828\") " pod="openstack/cinder-api-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.232518 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/18130341-5ea1-4803-9b04-4d8ccb122828-config-data-custom\") pod \"cinder-api-0\" (UID: \"18130341-5ea1-4803-9b04-4d8ccb122828\") " pod="openstack/cinder-api-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.232540 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18130341-5ea1-4803-9b04-4d8ccb122828-scripts\") pod \"cinder-api-0\" (UID: \"18130341-5ea1-4803-9b04-4d8ccb122828\") " pod="openstack/cinder-api-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.232585 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18130341-5ea1-4803-9b04-4d8ccb122828-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"18130341-5ea1-4803-9b04-4d8ccb122828\") " pod="openstack/cinder-api-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.334281 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/18130341-5ea1-4803-9b04-4d8ccb122828-logs\") pod \"cinder-api-0\" (UID: \"18130341-5ea1-4803-9b04-4d8ccb122828\") " pod="openstack/cinder-api-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.334326 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mfk9k\" (UniqueName: \"kubernetes.io/projected/18130341-5ea1-4803-9b04-4d8ccb122828-kube-api-access-mfk9k\") pod \"cinder-api-0\" (UID: \"18130341-5ea1-4803-9b04-4d8ccb122828\") " pod="openstack/cinder-api-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.334381 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/18130341-5ea1-4803-9b04-4d8ccb122828-etc-machine-id\") pod \"cinder-api-0\" (UID: \"18130341-5ea1-4803-9b04-4d8ccb122828\") " pod="openstack/cinder-api-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.334404 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/18130341-5ea1-4803-9b04-4d8ccb122828-config-data-custom\") pod \"cinder-api-0\" (UID: \"18130341-5ea1-4803-9b04-4d8ccb122828\") " pod="openstack/cinder-api-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.334420 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18130341-5ea1-4803-9b04-4d8ccb122828-scripts\") pod \"cinder-api-0\" (UID: \"18130341-5ea1-4803-9b04-4d8ccb122828\") " pod="openstack/cinder-api-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.334446 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18130341-5ea1-4803-9b04-4d8ccb122828-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"18130341-5ea1-4803-9b04-4d8ccb122828\") " pod="openstack/cinder-api-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.334505 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18130341-5ea1-4803-9b04-4d8ccb122828-config-data\") pod \"cinder-api-0\" (UID: \"18130341-5ea1-4803-9b04-4d8ccb122828\") " pod="openstack/cinder-api-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.335235 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/18130341-5ea1-4803-9b04-4d8ccb122828-etc-machine-id\") pod \"cinder-api-0\" (UID: \"18130341-5ea1-4803-9b04-4d8ccb122828\") " pod="openstack/cinder-api-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.336050 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/18130341-5ea1-4803-9b04-4d8ccb122828-logs\") pod \"cinder-api-0\" (UID: \"18130341-5ea1-4803-9b04-4d8ccb122828\") " pod="openstack/cinder-api-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.341703 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18130341-5ea1-4803-9b04-4d8ccb122828-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"18130341-5ea1-4803-9b04-4d8ccb122828\") " pod="openstack/cinder-api-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.341946 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18130341-5ea1-4803-9b04-4d8ccb122828-scripts\") pod \"cinder-api-0\" (UID: \"18130341-5ea1-4803-9b04-4d8ccb122828\") " pod="openstack/cinder-api-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.342289 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/18130341-5ea1-4803-9b04-4d8ccb122828-config-data-custom\") pod \"cinder-api-0\" (UID: \"18130341-5ea1-4803-9b04-4d8ccb122828\") " pod="openstack/cinder-api-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.343389 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18130341-5ea1-4803-9b04-4d8ccb122828-config-data\") pod \"cinder-api-0\" (UID: \"18130341-5ea1-4803-9b04-4d8ccb122828\") " pod="openstack/cinder-api-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.353871 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mfk9k\" (UniqueName: \"kubernetes.io/projected/18130341-5ea1-4803-9b04-4d8ccb122828-kube-api-access-mfk9k\") pod \"cinder-api-0\" (UID: \"18130341-5ea1-4803-9b04-4d8ccb122828\") " pod="openstack/cinder-api-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.418587 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 05 23:26:39 crc kubenswrapper[4910]: I0105 23:26:39.899653 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 05 23:26:39 crc kubenswrapper[4910]: W0105 23:26:39.900192 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod18130341_5ea1_4803_9b04_4d8ccb122828.slice/crio-d2049cca0ec32bb2437dd1e871e80e73e3e233369693fde05c518f70d78fa20e WatchSource:0}: Error finding container d2049cca0ec32bb2437dd1e871e80e73e3e233369693fde05c518f70d78fa20e: Status 404 returned error can't find the container with id d2049cca0ec32bb2437dd1e871e80e73e3e233369693fde05c518f70d78fa20e Jan 05 23:26:40 crc kubenswrapper[4910]: I0105 23:26:40.028414 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"4cb60132-8c1f-4d0f-9582-32e551e2f4f9","Type":"ContainerStarted","Data":"ea783f03702d96840581711aa30948e2570080c5644b103d28c4e82d809af810"} Jan 05 23:26:40 crc kubenswrapper[4910]: I0105 23:26:40.033738 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"18130341-5ea1-4803-9b04-4d8ccb122828","Type":"ContainerStarted","Data":"d2049cca0ec32bb2437dd1e871e80e73e3e233369693fde05c518f70d78fa20e"} Jan 05 23:26:40 crc kubenswrapper[4910]: I0105 23:26:40.036760 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 05 23:26:40 crc kubenswrapper[4910]: I0105 23:26:40.076017 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-backup-0" podStartSLOduration=3.238319354 podStartE2EDuration="4.075996541s" podCreationTimestamp="2026-01-05 23:26:36 +0000 UTC" firstStartedPulling="2026-01-05 23:26:37.672434991 +0000 UTC m=+5729.249932661" lastFinishedPulling="2026-01-05 23:26:38.510112178 +0000 UTC m=+5730.087609848" observedRunningTime="2026-01-05 23:26:40.050631483 +0000 UTC m=+5731.628129193" watchObservedRunningTime="2026-01-05 23:26:40.075996541 +0000 UTC m=+5731.653494201" Jan 05 23:26:40 crc kubenswrapper[4910]: I0105 23:26:40.746592 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd8216dd-10b4-41f8-bc8f-ef7437020264" path="/var/lib/kubelet/pods/cd8216dd-10b4-41f8-bc8f-ef7437020264/volumes" Jan 05 23:26:40 crc kubenswrapper[4910]: I0105 23:26:40.952351 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 23:26:40 crc kubenswrapper[4910]: I0105 23:26:40.952430 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 23:26:41 crc kubenswrapper[4910]: I0105 23:26:41.047787 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"18130341-5ea1-4803-9b04-4d8ccb122828","Type":"ContainerStarted","Data":"3e600b6ed1bb32b7b8a006ac16bdeb52cded847228b1c9cc0d7ad7624ba45e11"} Jan 05 23:26:41 crc kubenswrapper[4910]: I0105 23:26:41.392320 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:42 crc kubenswrapper[4910]: I0105 23:26:42.033831 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-backup-0" Jan 05 23:26:42 crc kubenswrapper[4910]: I0105 23:26:42.063262 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"18130341-5ea1-4803-9b04-4d8ccb122828","Type":"ContainerStarted","Data":"116feeb192814d7f73628e5549b1d7cd240d0359db9baff49013b15fde2b4536"} Jan 05 23:26:42 crc kubenswrapper[4910]: I0105 23:26:42.063756 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 05 23:26:42 crc kubenswrapper[4910]: I0105 23:26:42.114032 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.114000459 podStartE2EDuration="3.114000459s" podCreationTimestamp="2026-01-05 23:26:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:26:42.104581536 +0000 UTC m=+5733.682079236" watchObservedRunningTime="2026-01-05 23:26:42.114000459 +0000 UTC m=+5733.691498159" Jan 05 23:26:44 crc kubenswrapper[4910]: I0105 23:26:44.223584 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 05 23:26:44 crc kubenswrapper[4910]: I0105 23:26:44.316492 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 05 23:26:45 crc kubenswrapper[4910]: I0105 23:26:45.091615 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="30233c9b-ade9-41cf-ab01-b40994eaeb8d" containerName="cinder-scheduler" containerID="cri-o://16b3e2f84d6e576749c4d388bdfe47c72e240020cde87f3798b3c95cd156b736" gracePeriod=30 Jan 05 23:26:45 crc kubenswrapper[4910]: I0105 23:26:45.091689 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="30233c9b-ade9-41cf-ab01-b40994eaeb8d" containerName="probe" containerID="cri-o://ab869ac5831a1316d67724a799045dc1a6522ced599238fd2ac236450bfd298e" gracePeriod=30 Jan 05 23:26:46 crc kubenswrapper[4910]: I0105 23:26:46.115385 4910 generic.go:334] "Generic (PLEG): container finished" podID="30233c9b-ade9-41cf-ab01-b40994eaeb8d" containerID="ab869ac5831a1316d67724a799045dc1a6522ced599238fd2ac236450bfd298e" exitCode=0 Jan 05 23:26:46 crc kubenswrapper[4910]: I0105 23:26:46.115436 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"30233c9b-ade9-41cf-ab01-b40994eaeb8d","Type":"ContainerDied","Data":"ab869ac5831a1316d67724a799045dc1a6522ced599238fd2ac236450bfd298e"} Jan 05 23:26:46 crc kubenswrapper[4910]: I0105 23:26:46.646747 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-volume-volume1-0" Jan 05 23:26:47 crc kubenswrapper[4910]: I0105 23:26:47.143232 4910 generic.go:334] "Generic (PLEG): container finished" podID="30233c9b-ade9-41cf-ab01-b40994eaeb8d" containerID="16b3e2f84d6e576749c4d388bdfe47c72e240020cde87f3798b3c95cd156b736" exitCode=0 Jan 05 23:26:47 crc kubenswrapper[4910]: I0105 23:26:47.143320 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"30233c9b-ade9-41cf-ab01-b40994eaeb8d","Type":"ContainerDied","Data":"16b3e2f84d6e576749c4d388bdfe47c72e240020cde87f3798b3c95cd156b736"} Jan 05 23:26:47 crc kubenswrapper[4910]: I0105 23:26:47.346745 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-backup-0" Jan 05 23:26:47 crc kubenswrapper[4910]: I0105 23:26:47.476814 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 05 23:26:47 crc kubenswrapper[4910]: I0105 23:26:47.641256 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/30233c9b-ade9-41cf-ab01-b40994eaeb8d-etc-machine-id\") pod \"30233c9b-ade9-41cf-ab01-b40994eaeb8d\" (UID: \"30233c9b-ade9-41cf-ab01-b40994eaeb8d\") " Jan 05 23:26:47 crc kubenswrapper[4910]: I0105 23:26:47.641334 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/30233c9b-ade9-41cf-ab01-b40994eaeb8d-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "30233c9b-ade9-41cf-ab01-b40994eaeb8d" (UID: "30233c9b-ade9-41cf-ab01-b40994eaeb8d"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 23:26:47 crc kubenswrapper[4910]: I0105 23:26:47.641411 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4zf77\" (UniqueName: \"kubernetes.io/projected/30233c9b-ade9-41cf-ab01-b40994eaeb8d-kube-api-access-4zf77\") pod \"30233c9b-ade9-41cf-ab01-b40994eaeb8d\" (UID: \"30233c9b-ade9-41cf-ab01-b40994eaeb8d\") " Jan 05 23:26:47 crc kubenswrapper[4910]: I0105 23:26:47.641599 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/30233c9b-ade9-41cf-ab01-b40994eaeb8d-config-data-custom\") pod \"30233c9b-ade9-41cf-ab01-b40994eaeb8d\" (UID: \"30233c9b-ade9-41cf-ab01-b40994eaeb8d\") " Jan 05 23:26:47 crc kubenswrapper[4910]: I0105 23:26:47.641754 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/30233c9b-ade9-41cf-ab01-b40994eaeb8d-scripts\") pod \"30233c9b-ade9-41cf-ab01-b40994eaeb8d\" (UID: \"30233c9b-ade9-41cf-ab01-b40994eaeb8d\") " Jan 05 23:26:47 crc kubenswrapper[4910]: I0105 23:26:47.641849 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30233c9b-ade9-41cf-ab01-b40994eaeb8d-combined-ca-bundle\") pod \"30233c9b-ade9-41cf-ab01-b40994eaeb8d\" (UID: \"30233c9b-ade9-41cf-ab01-b40994eaeb8d\") " Jan 05 23:26:47 crc kubenswrapper[4910]: I0105 23:26:47.642037 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30233c9b-ade9-41cf-ab01-b40994eaeb8d-config-data\") pod \"30233c9b-ade9-41cf-ab01-b40994eaeb8d\" (UID: \"30233c9b-ade9-41cf-ab01-b40994eaeb8d\") " Jan 05 23:26:47 crc kubenswrapper[4910]: I0105 23:26:47.642949 4910 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/30233c9b-ade9-41cf-ab01-b40994eaeb8d-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:47 crc kubenswrapper[4910]: I0105 23:26:47.653276 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30233c9b-ade9-41cf-ab01-b40994eaeb8d-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "30233c9b-ade9-41cf-ab01-b40994eaeb8d" (UID: "30233c9b-ade9-41cf-ab01-b40994eaeb8d"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:26:47 crc kubenswrapper[4910]: I0105 23:26:47.680295 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30233c9b-ade9-41cf-ab01-b40994eaeb8d-scripts" (OuterVolumeSpecName: "scripts") pod "30233c9b-ade9-41cf-ab01-b40994eaeb8d" (UID: "30233c9b-ade9-41cf-ab01-b40994eaeb8d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:26:47 crc kubenswrapper[4910]: I0105 23:26:47.699436 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/30233c9b-ade9-41cf-ab01-b40994eaeb8d-kube-api-access-4zf77" (OuterVolumeSpecName: "kube-api-access-4zf77") pod "30233c9b-ade9-41cf-ab01-b40994eaeb8d" (UID: "30233c9b-ade9-41cf-ab01-b40994eaeb8d"). InnerVolumeSpecName "kube-api-access-4zf77". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:26:47 crc kubenswrapper[4910]: I0105 23:26:47.745069 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4zf77\" (UniqueName: \"kubernetes.io/projected/30233c9b-ade9-41cf-ab01-b40994eaeb8d-kube-api-access-4zf77\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:47 crc kubenswrapper[4910]: I0105 23:26:47.745100 4910 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/30233c9b-ade9-41cf-ab01-b40994eaeb8d-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:47 crc kubenswrapper[4910]: I0105 23:26:47.745109 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/30233c9b-ade9-41cf-ab01-b40994eaeb8d-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:47 crc kubenswrapper[4910]: I0105 23:26:47.771219 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30233c9b-ade9-41cf-ab01-b40994eaeb8d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "30233c9b-ade9-41cf-ab01-b40994eaeb8d" (UID: "30233c9b-ade9-41cf-ab01-b40994eaeb8d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:26:47 crc kubenswrapper[4910]: I0105 23:26:47.804344 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30233c9b-ade9-41cf-ab01-b40994eaeb8d-config-data" (OuterVolumeSpecName: "config-data") pod "30233c9b-ade9-41cf-ab01-b40994eaeb8d" (UID: "30233c9b-ade9-41cf-ab01-b40994eaeb8d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:26:47 crc kubenswrapper[4910]: I0105 23:26:47.847049 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30233c9b-ade9-41cf-ab01-b40994eaeb8d-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:47 crc kubenswrapper[4910]: I0105 23:26:47.847081 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30233c9b-ade9-41cf-ab01-b40994eaeb8d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.157650 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"30233c9b-ade9-41cf-ab01-b40994eaeb8d","Type":"ContainerDied","Data":"056a5b397058e128a9fc0063afdafc98870f47ed3d8f21e60b30e438c01ea2fe"} Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.157732 4910 scope.go:117] "RemoveContainer" containerID="ab869ac5831a1316d67724a799045dc1a6522ced599238fd2ac236450bfd298e" Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.157732 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.196629 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.197773 4910 scope.go:117] "RemoveContainer" containerID="16b3e2f84d6e576749c4d388bdfe47c72e240020cde87f3798b3c95cd156b736" Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.206009 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.234633 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 05 23:26:48 crc kubenswrapper[4910]: E0105 23:26:48.235453 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30233c9b-ade9-41cf-ab01-b40994eaeb8d" containerName="cinder-scheduler" Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.235478 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="30233c9b-ade9-41cf-ab01-b40994eaeb8d" containerName="cinder-scheduler" Jan 05 23:26:48 crc kubenswrapper[4910]: E0105 23:26:48.235507 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30233c9b-ade9-41cf-ab01-b40994eaeb8d" containerName="probe" Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.235515 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="30233c9b-ade9-41cf-ab01-b40994eaeb8d" containerName="probe" Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.235710 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="30233c9b-ade9-41cf-ab01-b40994eaeb8d" containerName="probe" Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.235731 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="30233c9b-ade9-41cf-ab01-b40994eaeb8d" containerName="cinder-scheduler" Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.236780 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.241206 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.293380 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.356186 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/07d1adea-bd0b-4e0a-a673-b20a56d68a20-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"07d1adea-bd0b-4e0a-a673-b20a56d68a20\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.356253 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9h6ll\" (UniqueName: \"kubernetes.io/projected/07d1adea-bd0b-4e0a-a673-b20a56d68a20-kube-api-access-9h6ll\") pod \"cinder-scheduler-0\" (UID: \"07d1adea-bd0b-4e0a-a673-b20a56d68a20\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.356285 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07d1adea-bd0b-4e0a-a673-b20a56d68a20-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"07d1adea-bd0b-4e0a-a673-b20a56d68a20\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.356333 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/07d1adea-bd0b-4e0a-a673-b20a56d68a20-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"07d1adea-bd0b-4e0a-a673-b20a56d68a20\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.356360 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07d1adea-bd0b-4e0a-a673-b20a56d68a20-config-data\") pod \"cinder-scheduler-0\" (UID: \"07d1adea-bd0b-4e0a-a673-b20a56d68a20\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.356414 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07d1adea-bd0b-4e0a-a673-b20a56d68a20-scripts\") pod \"cinder-scheduler-0\" (UID: \"07d1adea-bd0b-4e0a-a673-b20a56d68a20\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.458553 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/07d1adea-bd0b-4e0a-a673-b20a56d68a20-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"07d1adea-bd0b-4e0a-a673-b20a56d68a20\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.458628 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9h6ll\" (UniqueName: \"kubernetes.io/projected/07d1adea-bd0b-4e0a-a673-b20a56d68a20-kube-api-access-9h6ll\") pod \"cinder-scheduler-0\" (UID: \"07d1adea-bd0b-4e0a-a673-b20a56d68a20\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.458669 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07d1adea-bd0b-4e0a-a673-b20a56d68a20-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"07d1adea-bd0b-4e0a-a673-b20a56d68a20\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.458731 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/07d1adea-bd0b-4e0a-a673-b20a56d68a20-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"07d1adea-bd0b-4e0a-a673-b20a56d68a20\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.458762 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07d1adea-bd0b-4e0a-a673-b20a56d68a20-config-data\") pod \"cinder-scheduler-0\" (UID: \"07d1adea-bd0b-4e0a-a673-b20a56d68a20\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.458827 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07d1adea-bd0b-4e0a-a673-b20a56d68a20-scripts\") pod \"cinder-scheduler-0\" (UID: \"07d1adea-bd0b-4e0a-a673-b20a56d68a20\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.458870 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/07d1adea-bd0b-4e0a-a673-b20a56d68a20-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"07d1adea-bd0b-4e0a-a673-b20a56d68a20\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.463734 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/07d1adea-bd0b-4e0a-a673-b20a56d68a20-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"07d1adea-bd0b-4e0a-a673-b20a56d68a20\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.469429 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07d1adea-bd0b-4e0a-a673-b20a56d68a20-scripts\") pod \"cinder-scheduler-0\" (UID: \"07d1adea-bd0b-4e0a-a673-b20a56d68a20\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.469979 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07d1adea-bd0b-4e0a-a673-b20a56d68a20-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"07d1adea-bd0b-4e0a-a673-b20a56d68a20\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.470217 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07d1adea-bd0b-4e0a-a673-b20a56d68a20-config-data\") pod \"cinder-scheduler-0\" (UID: \"07d1adea-bd0b-4e0a-a673-b20a56d68a20\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.484039 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9h6ll\" (UniqueName: \"kubernetes.io/projected/07d1adea-bd0b-4e0a-a673-b20a56d68a20-kube-api-access-9h6ll\") pod \"cinder-scheduler-0\" (UID: \"07d1adea-bd0b-4e0a-a673-b20a56d68a20\") " pod="openstack/cinder-scheduler-0" Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.611613 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 05 23:26:48 crc kubenswrapper[4910]: I0105 23:26:48.741064 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="30233c9b-ade9-41cf-ab01-b40994eaeb8d" path="/var/lib/kubelet/pods/30233c9b-ade9-41cf-ab01-b40994eaeb8d/volumes" Jan 05 23:26:49 crc kubenswrapper[4910]: I0105 23:26:49.106921 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 05 23:26:49 crc kubenswrapper[4910]: I0105 23:26:49.179228 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"07d1adea-bd0b-4e0a-a673-b20a56d68a20","Type":"ContainerStarted","Data":"e135184f9bf5cc93641a4dd024ad116a94cdb96c960a747bc51745f55a4f4210"} Jan 05 23:26:50 crc kubenswrapper[4910]: I0105 23:26:50.219234 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"07d1adea-bd0b-4e0a-a673-b20a56d68a20","Type":"ContainerStarted","Data":"4ee893fd3dc397abe2c855de4f30a28f5c0d53eb6c1a4d5625bb348b75b1b81d"} Jan 05 23:26:51 crc kubenswrapper[4910]: I0105 23:26:51.199734 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 05 23:26:51 crc kubenswrapper[4910]: I0105 23:26:51.259146 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"07d1adea-bd0b-4e0a-a673-b20a56d68a20","Type":"ContainerStarted","Data":"421b50b9725ac775286115b1b1925a1d119b2031dde26f3511aade000a95264a"} Jan 05 23:26:51 crc kubenswrapper[4910]: I0105 23:26:51.284620 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.284605465 podStartE2EDuration="3.284605465s" podCreationTimestamp="2026-01-05 23:26:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:26:51.281927458 +0000 UTC m=+5742.859425128" watchObservedRunningTime="2026-01-05 23:26:51.284605465 +0000 UTC m=+5742.862103135" Jan 05 23:26:53 crc kubenswrapper[4910]: I0105 23:26:53.611881 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 05 23:26:58 crc kubenswrapper[4910]: I0105 23:26:58.947660 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 05 23:27:10 crc kubenswrapper[4910]: I0105 23:27:10.952356 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 23:27:10 crc kubenswrapper[4910]: I0105 23:27:10.953039 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 23:27:35 crc kubenswrapper[4910]: I0105 23:27:35.274025 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-pgv4m"] Jan 05 23:27:35 crc kubenswrapper[4910]: I0105 23:27:35.278989 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pgv4m" Jan 05 23:27:35 crc kubenswrapper[4910]: I0105 23:27:35.291558 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pgv4m"] Jan 05 23:27:35 crc kubenswrapper[4910]: I0105 23:27:35.461967 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b16547cc-0dd4-4488-b5ac-a18ef25060f8-utilities\") pod \"community-operators-pgv4m\" (UID: \"b16547cc-0dd4-4488-b5ac-a18ef25060f8\") " pod="openshift-marketplace/community-operators-pgv4m" Jan 05 23:27:35 crc kubenswrapper[4910]: I0105 23:27:35.462201 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2fv5x\" (UniqueName: \"kubernetes.io/projected/b16547cc-0dd4-4488-b5ac-a18ef25060f8-kube-api-access-2fv5x\") pod \"community-operators-pgv4m\" (UID: \"b16547cc-0dd4-4488-b5ac-a18ef25060f8\") " pod="openshift-marketplace/community-operators-pgv4m" Jan 05 23:27:35 crc kubenswrapper[4910]: I0105 23:27:35.462277 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b16547cc-0dd4-4488-b5ac-a18ef25060f8-catalog-content\") pod \"community-operators-pgv4m\" (UID: \"b16547cc-0dd4-4488-b5ac-a18ef25060f8\") " pod="openshift-marketplace/community-operators-pgv4m" Jan 05 23:27:35 crc kubenswrapper[4910]: I0105 23:27:35.563772 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2fv5x\" (UniqueName: \"kubernetes.io/projected/b16547cc-0dd4-4488-b5ac-a18ef25060f8-kube-api-access-2fv5x\") pod \"community-operators-pgv4m\" (UID: \"b16547cc-0dd4-4488-b5ac-a18ef25060f8\") " pod="openshift-marketplace/community-operators-pgv4m" Jan 05 23:27:35 crc kubenswrapper[4910]: I0105 23:27:35.563859 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b16547cc-0dd4-4488-b5ac-a18ef25060f8-catalog-content\") pod \"community-operators-pgv4m\" (UID: \"b16547cc-0dd4-4488-b5ac-a18ef25060f8\") " pod="openshift-marketplace/community-operators-pgv4m" Jan 05 23:27:35 crc kubenswrapper[4910]: I0105 23:27:35.563978 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b16547cc-0dd4-4488-b5ac-a18ef25060f8-utilities\") pod \"community-operators-pgv4m\" (UID: \"b16547cc-0dd4-4488-b5ac-a18ef25060f8\") " pod="openshift-marketplace/community-operators-pgv4m" Jan 05 23:27:35 crc kubenswrapper[4910]: I0105 23:27:35.564582 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b16547cc-0dd4-4488-b5ac-a18ef25060f8-utilities\") pod \"community-operators-pgv4m\" (UID: \"b16547cc-0dd4-4488-b5ac-a18ef25060f8\") " pod="openshift-marketplace/community-operators-pgv4m" Jan 05 23:27:35 crc kubenswrapper[4910]: I0105 23:27:35.564770 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b16547cc-0dd4-4488-b5ac-a18ef25060f8-catalog-content\") pod \"community-operators-pgv4m\" (UID: \"b16547cc-0dd4-4488-b5ac-a18ef25060f8\") " pod="openshift-marketplace/community-operators-pgv4m" Jan 05 23:27:35 crc kubenswrapper[4910]: I0105 23:27:35.590220 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2fv5x\" (UniqueName: \"kubernetes.io/projected/b16547cc-0dd4-4488-b5ac-a18ef25060f8-kube-api-access-2fv5x\") pod \"community-operators-pgv4m\" (UID: \"b16547cc-0dd4-4488-b5ac-a18ef25060f8\") " pod="openshift-marketplace/community-operators-pgv4m" Jan 05 23:27:35 crc kubenswrapper[4910]: I0105 23:27:35.613291 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pgv4m" Jan 05 23:27:36 crc kubenswrapper[4910]: I0105 23:27:36.270872 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pgv4m"] Jan 05 23:27:36 crc kubenswrapper[4910]: I0105 23:27:36.970567 4910 generic.go:334] "Generic (PLEG): container finished" podID="b16547cc-0dd4-4488-b5ac-a18ef25060f8" containerID="dc1c324966bf6d3ee199a6250e158ac808f62313bbad9c1ed4fd841effec5bbf" exitCode=0 Jan 05 23:27:36 crc kubenswrapper[4910]: I0105 23:27:36.970642 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pgv4m" event={"ID":"b16547cc-0dd4-4488-b5ac-a18ef25060f8","Type":"ContainerDied","Data":"dc1c324966bf6d3ee199a6250e158ac808f62313bbad9c1ed4fd841effec5bbf"} Jan 05 23:27:36 crc kubenswrapper[4910]: I0105 23:27:36.970877 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pgv4m" event={"ID":"b16547cc-0dd4-4488-b5ac-a18ef25060f8","Type":"ContainerStarted","Data":"08f9a0c378cff6f4b6ef2b202b19458bf39604839ce8d6552a14e0bd47dc9400"} Jan 05 23:27:39 crc kubenswrapper[4910]: I0105 23:27:39.001755 4910 generic.go:334] "Generic (PLEG): container finished" podID="b16547cc-0dd4-4488-b5ac-a18ef25060f8" containerID="b8745930d53a2d383e578a2f544723e0788e8064b44e0f45224031bd02c125ed" exitCode=0 Jan 05 23:27:39 crc kubenswrapper[4910]: I0105 23:27:39.001837 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pgv4m" event={"ID":"b16547cc-0dd4-4488-b5ac-a18ef25060f8","Type":"ContainerDied","Data":"b8745930d53a2d383e578a2f544723e0788e8064b44e0f45224031bd02c125ed"} Jan 05 23:27:40 crc kubenswrapper[4910]: I0105 23:27:40.017880 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pgv4m" event={"ID":"b16547cc-0dd4-4488-b5ac-a18ef25060f8","Type":"ContainerStarted","Data":"7f0546f662dd1758034e69f59a222b80e9208a0d7edf664a40a892ec56035df4"} Jan 05 23:27:40 crc kubenswrapper[4910]: I0105 23:27:40.047819 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-pgv4m" podStartSLOduration=2.47497147 podStartE2EDuration="5.047790783s" podCreationTimestamp="2026-01-05 23:27:35 +0000 UTC" firstStartedPulling="2026-01-05 23:27:36.973087779 +0000 UTC m=+5788.550585479" lastFinishedPulling="2026-01-05 23:27:39.545907122 +0000 UTC m=+5791.123404792" observedRunningTime="2026-01-05 23:27:40.035043047 +0000 UTC m=+5791.612540727" watchObservedRunningTime="2026-01-05 23:27:40.047790783 +0000 UTC m=+5791.625288463" Jan 05 23:27:40 crc kubenswrapper[4910]: I0105 23:27:40.952010 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 23:27:40 crc kubenswrapper[4910]: I0105 23:27:40.952361 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 23:27:40 crc kubenswrapper[4910]: I0105 23:27:40.952423 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 23:27:40 crc kubenswrapper[4910]: I0105 23:27:40.953554 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0e240e4effc2bd679e0f96fec5bc054d5530ae8a8dd2bd9c82e2bc521473387b"} pod="openshift-machine-config-operator/machine-config-daemon-p4t85" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 05 23:27:40 crc kubenswrapper[4910]: I0105 23:27:40.953685 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" containerID="cri-o://0e240e4effc2bd679e0f96fec5bc054d5530ae8a8dd2bd9c82e2bc521473387b" gracePeriod=600 Jan 05 23:27:42 crc kubenswrapper[4910]: I0105 23:27:42.036377 4910 generic.go:334] "Generic (PLEG): container finished" podID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerID="0e240e4effc2bd679e0f96fec5bc054d5530ae8a8dd2bd9c82e2bc521473387b" exitCode=0 Jan 05 23:27:42 crc kubenswrapper[4910]: I0105 23:27:42.036440 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerDied","Data":"0e240e4effc2bd679e0f96fec5bc054d5530ae8a8dd2bd9c82e2bc521473387b"} Jan 05 23:27:42 crc kubenswrapper[4910]: I0105 23:27:42.036878 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerStarted","Data":"f0334e4359831541c2f90c0defd8867b10c126be0235edac4acd76d34a0ba514"} Jan 05 23:27:42 crc kubenswrapper[4910]: I0105 23:27:42.036903 4910 scope.go:117] "RemoveContainer" containerID="c31f0b3ea3ce0d4b0fb62da87fc6d8144c3940f5821592c646ec514147dae31e" Jan 05 23:27:45 crc kubenswrapper[4910]: I0105 23:27:45.613514 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-pgv4m" Jan 05 23:27:45 crc kubenswrapper[4910]: I0105 23:27:45.614610 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-pgv4m" Jan 05 23:27:45 crc kubenswrapper[4910]: I0105 23:27:45.698460 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-pgv4m" Jan 05 23:27:46 crc kubenswrapper[4910]: I0105 23:27:46.173853 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-pgv4m" Jan 05 23:27:46 crc kubenswrapper[4910]: I0105 23:27:46.255790 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pgv4m"] Jan 05 23:27:48 crc kubenswrapper[4910]: I0105 23:27:48.111993 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-pgv4m" podUID="b16547cc-0dd4-4488-b5ac-a18ef25060f8" containerName="registry-server" containerID="cri-o://7f0546f662dd1758034e69f59a222b80e9208a0d7edf664a40a892ec56035df4" gracePeriod=2 Jan 05 23:27:48 crc kubenswrapper[4910]: I0105 23:27:48.580968 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pgv4m" Jan 05 23:27:48 crc kubenswrapper[4910]: I0105 23:27:48.774982 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2fv5x\" (UniqueName: \"kubernetes.io/projected/b16547cc-0dd4-4488-b5ac-a18ef25060f8-kube-api-access-2fv5x\") pod \"b16547cc-0dd4-4488-b5ac-a18ef25060f8\" (UID: \"b16547cc-0dd4-4488-b5ac-a18ef25060f8\") " Jan 05 23:27:48 crc kubenswrapper[4910]: I0105 23:27:48.775212 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b16547cc-0dd4-4488-b5ac-a18ef25060f8-utilities\") pod \"b16547cc-0dd4-4488-b5ac-a18ef25060f8\" (UID: \"b16547cc-0dd4-4488-b5ac-a18ef25060f8\") " Jan 05 23:27:48 crc kubenswrapper[4910]: I0105 23:27:48.775458 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b16547cc-0dd4-4488-b5ac-a18ef25060f8-catalog-content\") pod \"b16547cc-0dd4-4488-b5ac-a18ef25060f8\" (UID: \"b16547cc-0dd4-4488-b5ac-a18ef25060f8\") " Jan 05 23:27:48 crc kubenswrapper[4910]: I0105 23:27:48.777277 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b16547cc-0dd4-4488-b5ac-a18ef25060f8-utilities" (OuterVolumeSpecName: "utilities") pod "b16547cc-0dd4-4488-b5ac-a18ef25060f8" (UID: "b16547cc-0dd4-4488-b5ac-a18ef25060f8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:27:48 crc kubenswrapper[4910]: I0105 23:27:48.785595 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b16547cc-0dd4-4488-b5ac-a18ef25060f8-kube-api-access-2fv5x" (OuterVolumeSpecName: "kube-api-access-2fv5x") pod "b16547cc-0dd4-4488-b5ac-a18ef25060f8" (UID: "b16547cc-0dd4-4488-b5ac-a18ef25060f8"). InnerVolumeSpecName "kube-api-access-2fv5x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:27:48 crc kubenswrapper[4910]: I0105 23:27:48.879230 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2fv5x\" (UniqueName: \"kubernetes.io/projected/b16547cc-0dd4-4488-b5ac-a18ef25060f8-kube-api-access-2fv5x\") on node \"crc\" DevicePath \"\"" Jan 05 23:27:48 crc kubenswrapper[4910]: I0105 23:27:48.879292 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b16547cc-0dd4-4488-b5ac-a18ef25060f8-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 23:27:49 crc kubenswrapper[4910]: I0105 23:27:49.054323 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b16547cc-0dd4-4488-b5ac-a18ef25060f8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b16547cc-0dd4-4488-b5ac-a18ef25060f8" (UID: "b16547cc-0dd4-4488-b5ac-a18ef25060f8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:27:49 crc kubenswrapper[4910]: I0105 23:27:49.084548 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b16547cc-0dd4-4488-b5ac-a18ef25060f8-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 23:27:49 crc kubenswrapper[4910]: I0105 23:27:49.130087 4910 generic.go:334] "Generic (PLEG): container finished" podID="b16547cc-0dd4-4488-b5ac-a18ef25060f8" containerID="7f0546f662dd1758034e69f59a222b80e9208a0d7edf664a40a892ec56035df4" exitCode=0 Jan 05 23:27:49 crc kubenswrapper[4910]: I0105 23:27:49.130189 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pgv4m" event={"ID":"b16547cc-0dd4-4488-b5ac-a18ef25060f8","Type":"ContainerDied","Data":"7f0546f662dd1758034e69f59a222b80e9208a0d7edf664a40a892ec56035df4"} Jan 05 23:27:49 crc kubenswrapper[4910]: I0105 23:27:49.131340 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pgv4m" event={"ID":"b16547cc-0dd4-4488-b5ac-a18ef25060f8","Type":"ContainerDied","Data":"08f9a0c378cff6f4b6ef2b202b19458bf39604839ce8d6552a14e0bd47dc9400"} Jan 05 23:27:49 crc kubenswrapper[4910]: I0105 23:27:49.131380 4910 scope.go:117] "RemoveContainer" containerID="7f0546f662dd1758034e69f59a222b80e9208a0d7edf664a40a892ec56035df4" Jan 05 23:27:49 crc kubenswrapper[4910]: I0105 23:27:49.130343 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pgv4m" Jan 05 23:27:49 crc kubenswrapper[4910]: I0105 23:27:49.180961 4910 scope.go:117] "RemoveContainer" containerID="b8745930d53a2d383e578a2f544723e0788e8064b44e0f45224031bd02c125ed" Jan 05 23:27:49 crc kubenswrapper[4910]: I0105 23:27:49.200280 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pgv4m"] Jan 05 23:27:49 crc kubenswrapper[4910]: I0105 23:27:49.221946 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-pgv4m"] Jan 05 23:27:49 crc kubenswrapper[4910]: I0105 23:27:49.230565 4910 scope.go:117] "RemoveContainer" containerID="dc1c324966bf6d3ee199a6250e158ac808f62313bbad9c1ed4fd841effec5bbf" Jan 05 23:27:49 crc kubenswrapper[4910]: I0105 23:27:49.278988 4910 scope.go:117] "RemoveContainer" containerID="7f0546f662dd1758034e69f59a222b80e9208a0d7edf664a40a892ec56035df4" Jan 05 23:27:49 crc kubenswrapper[4910]: E0105 23:27:49.279965 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f0546f662dd1758034e69f59a222b80e9208a0d7edf664a40a892ec56035df4\": container with ID starting with 7f0546f662dd1758034e69f59a222b80e9208a0d7edf664a40a892ec56035df4 not found: ID does not exist" containerID="7f0546f662dd1758034e69f59a222b80e9208a0d7edf664a40a892ec56035df4" Jan 05 23:27:49 crc kubenswrapper[4910]: I0105 23:27:49.280036 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f0546f662dd1758034e69f59a222b80e9208a0d7edf664a40a892ec56035df4"} err="failed to get container status \"7f0546f662dd1758034e69f59a222b80e9208a0d7edf664a40a892ec56035df4\": rpc error: code = NotFound desc = could not find container \"7f0546f662dd1758034e69f59a222b80e9208a0d7edf664a40a892ec56035df4\": container with ID starting with 7f0546f662dd1758034e69f59a222b80e9208a0d7edf664a40a892ec56035df4 not found: ID does not exist" Jan 05 23:27:49 crc kubenswrapper[4910]: I0105 23:27:49.280073 4910 scope.go:117] "RemoveContainer" containerID="b8745930d53a2d383e578a2f544723e0788e8064b44e0f45224031bd02c125ed" Jan 05 23:27:49 crc kubenswrapper[4910]: E0105 23:27:49.280539 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b8745930d53a2d383e578a2f544723e0788e8064b44e0f45224031bd02c125ed\": container with ID starting with b8745930d53a2d383e578a2f544723e0788e8064b44e0f45224031bd02c125ed not found: ID does not exist" containerID="b8745930d53a2d383e578a2f544723e0788e8064b44e0f45224031bd02c125ed" Jan 05 23:27:49 crc kubenswrapper[4910]: I0105 23:27:49.280583 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b8745930d53a2d383e578a2f544723e0788e8064b44e0f45224031bd02c125ed"} err="failed to get container status \"b8745930d53a2d383e578a2f544723e0788e8064b44e0f45224031bd02c125ed\": rpc error: code = NotFound desc = could not find container \"b8745930d53a2d383e578a2f544723e0788e8064b44e0f45224031bd02c125ed\": container with ID starting with b8745930d53a2d383e578a2f544723e0788e8064b44e0f45224031bd02c125ed not found: ID does not exist" Jan 05 23:27:49 crc kubenswrapper[4910]: I0105 23:27:49.280611 4910 scope.go:117] "RemoveContainer" containerID="dc1c324966bf6d3ee199a6250e158ac808f62313bbad9c1ed4fd841effec5bbf" Jan 05 23:27:49 crc kubenswrapper[4910]: E0105 23:27:49.281854 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc1c324966bf6d3ee199a6250e158ac808f62313bbad9c1ed4fd841effec5bbf\": container with ID starting with dc1c324966bf6d3ee199a6250e158ac808f62313bbad9c1ed4fd841effec5bbf not found: ID does not exist" containerID="dc1c324966bf6d3ee199a6250e158ac808f62313bbad9c1ed4fd841effec5bbf" Jan 05 23:27:49 crc kubenswrapper[4910]: I0105 23:27:49.281901 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc1c324966bf6d3ee199a6250e158ac808f62313bbad9c1ed4fd841effec5bbf"} err="failed to get container status \"dc1c324966bf6d3ee199a6250e158ac808f62313bbad9c1ed4fd841effec5bbf\": rpc error: code = NotFound desc = could not find container \"dc1c324966bf6d3ee199a6250e158ac808f62313bbad9c1ed4fd841effec5bbf\": container with ID starting with dc1c324966bf6d3ee199a6250e158ac808f62313bbad9c1ed4fd841effec5bbf not found: ID does not exist" Jan 05 23:27:50 crc kubenswrapper[4910]: I0105 23:27:50.738291 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b16547cc-0dd4-4488-b5ac-a18ef25060f8" path="/var/lib/kubelet/pods/b16547cc-0dd4-4488-b5ac-a18ef25060f8/volumes" Jan 05 23:28:18 crc kubenswrapper[4910]: I0105 23:28:18.070519 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-zvqpd"] Jan 05 23:28:18 crc kubenswrapper[4910]: I0105 23:28:18.083832 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-zvqpd"] Jan 05 23:28:18 crc kubenswrapper[4910]: I0105 23:28:18.090742 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-ddd0-account-create-update-tvcfw"] Jan 05 23:28:18 crc kubenswrapper[4910]: I0105 23:28:18.100727 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-ddd0-account-create-update-tvcfw"] Jan 05 23:28:18 crc kubenswrapper[4910]: I0105 23:28:18.741495 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="764cc26a-724f-470e-abf1-5b1a2339da16" path="/var/lib/kubelet/pods/764cc26a-724f-470e-abf1-5b1a2339da16/volumes" Jan 05 23:28:18 crc kubenswrapper[4910]: I0105 23:28:18.742080 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e2c91f15-103b-471f-8a4b-d6bb07712a59" path="/var/lib/kubelet/pods/e2c91f15-103b-471f-8a4b-d6bb07712a59/volumes" Jan 05 23:28:24 crc kubenswrapper[4910]: I0105 23:28:24.041544 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-mfj9l"] Jan 05 23:28:24 crc kubenswrapper[4910]: I0105 23:28:24.048687 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-mfj9l"] Jan 05 23:28:24 crc kubenswrapper[4910]: I0105 23:28:24.742683 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a2a5de11-93f1-4057-bd1e-f5752a9ffc19" path="/var/lib/kubelet/pods/a2a5de11-93f1-4057-bd1e-f5752a9ffc19/volumes" Jan 05 23:28:38 crc kubenswrapper[4910]: I0105 23:28:38.055878 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-2527p"] Jan 05 23:28:38 crc kubenswrapper[4910]: I0105 23:28:38.072565 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-2527p"] Jan 05 23:28:38 crc kubenswrapper[4910]: I0105 23:28:38.740259 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00d270eb-35a0-49cc-b90a-2f0b0c0c2acf" path="/var/lib/kubelet/pods/00d270eb-35a0-49cc-b90a-2f0b0c0c2acf/volumes" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.527716 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-s2ngj"] Jan 05 23:28:40 crc kubenswrapper[4910]: E0105 23:28:40.530145 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b16547cc-0dd4-4488-b5ac-a18ef25060f8" containerName="registry-server" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.530171 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b16547cc-0dd4-4488-b5ac-a18ef25060f8" containerName="registry-server" Jan 05 23:28:40 crc kubenswrapper[4910]: E0105 23:28:40.530193 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b16547cc-0dd4-4488-b5ac-a18ef25060f8" containerName="extract-content" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.530322 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b16547cc-0dd4-4488-b5ac-a18ef25060f8" containerName="extract-content" Jan 05 23:28:40 crc kubenswrapper[4910]: E0105 23:28:40.530461 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b16547cc-0dd4-4488-b5ac-a18ef25060f8" containerName="extract-utilities" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.530474 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b16547cc-0dd4-4488-b5ac-a18ef25060f8" containerName="extract-utilities" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.532163 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="b16547cc-0dd4-4488-b5ac-a18ef25060f8" containerName="registry-server" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.536061 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s2ngj" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.539555 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-fzgx2" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.540510 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.552220 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-s2ngj"] Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.585919 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kwt47\" (UniqueName: \"kubernetes.io/projected/63f387cd-1877-425f-8e52-5fe854426c89-kube-api-access-kwt47\") pod \"ovn-controller-s2ngj\" (UID: \"63f387cd-1877-425f-8e52-5fe854426c89\") " pod="openstack/ovn-controller-s2ngj" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.586005 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/63f387cd-1877-425f-8e52-5fe854426c89-var-run\") pod \"ovn-controller-s2ngj\" (UID: \"63f387cd-1877-425f-8e52-5fe854426c89\") " pod="openstack/ovn-controller-s2ngj" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.586044 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/63f387cd-1877-425f-8e52-5fe854426c89-var-run-ovn\") pod \"ovn-controller-s2ngj\" (UID: \"63f387cd-1877-425f-8e52-5fe854426c89\") " pod="openstack/ovn-controller-s2ngj" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.586101 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/63f387cd-1877-425f-8e52-5fe854426c89-scripts\") pod \"ovn-controller-s2ngj\" (UID: \"63f387cd-1877-425f-8e52-5fe854426c89\") " pod="openstack/ovn-controller-s2ngj" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.586148 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/63f387cd-1877-425f-8e52-5fe854426c89-var-log-ovn\") pod \"ovn-controller-s2ngj\" (UID: \"63f387cd-1877-425f-8e52-5fe854426c89\") " pod="openstack/ovn-controller-s2ngj" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.592041 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-gn4cl"] Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.594237 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-gn4cl" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.628036 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-gn4cl"] Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.688129 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/63f387cd-1877-425f-8e52-5fe854426c89-var-log-ovn\") pod \"ovn-controller-s2ngj\" (UID: \"63f387cd-1877-425f-8e52-5fe854426c89\") " pod="openstack/ovn-controller-s2ngj" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.688234 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e-etc-ovs\") pod \"ovn-controller-ovs-gn4cl\" (UID: \"4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e\") " pod="openstack/ovn-controller-ovs-gn4cl" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.688307 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e-var-run\") pod \"ovn-controller-ovs-gn4cl\" (UID: \"4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e\") " pod="openstack/ovn-controller-ovs-gn4cl" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.688347 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gwk7p\" (UniqueName: \"kubernetes.io/projected/4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e-kube-api-access-gwk7p\") pod \"ovn-controller-ovs-gn4cl\" (UID: \"4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e\") " pod="openstack/ovn-controller-ovs-gn4cl" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.688384 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kwt47\" (UniqueName: \"kubernetes.io/projected/63f387cd-1877-425f-8e52-5fe854426c89-kube-api-access-kwt47\") pod \"ovn-controller-s2ngj\" (UID: \"63f387cd-1877-425f-8e52-5fe854426c89\") " pod="openstack/ovn-controller-s2ngj" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.688419 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/63f387cd-1877-425f-8e52-5fe854426c89-var-run\") pod \"ovn-controller-s2ngj\" (UID: \"63f387cd-1877-425f-8e52-5fe854426c89\") " pod="openstack/ovn-controller-s2ngj" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.688464 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/63f387cd-1877-425f-8e52-5fe854426c89-var-run-ovn\") pod \"ovn-controller-s2ngj\" (UID: \"63f387cd-1877-425f-8e52-5fe854426c89\") " pod="openstack/ovn-controller-s2ngj" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.688518 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e-scripts\") pod \"ovn-controller-ovs-gn4cl\" (UID: \"4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e\") " pod="openstack/ovn-controller-ovs-gn4cl" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.688538 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e-var-lib\") pod \"ovn-controller-ovs-gn4cl\" (UID: \"4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e\") " pod="openstack/ovn-controller-ovs-gn4cl" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.688569 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/63f387cd-1877-425f-8e52-5fe854426c89-scripts\") pod \"ovn-controller-s2ngj\" (UID: \"63f387cd-1877-425f-8e52-5fe854426c89\") " pod="openstack/ovn-controller-s2ngj" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.688572 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/63f387cd-1877-425f-8e52-5fe854426c89-var-log-ovn\") pod \"ovn-controller-s2ngj\" (UID: \"63f387cd-1877-425f-8e52-5fe854426c89\") " pod="openstack/ovn-controller-s2ngj" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.688589 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e-var-log\") pod \"ovn-controller-ovs-gn4cl\" (UID: \"4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e\") " pod="openstack/ovn-controller-ovs-gn4cl" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.688737 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/63f387cd-1877-425f-8e52-5fe854426c89-var-run\") pod \"ovn-controller-s2ngj\" (UID: \"63f387cd-1877-425f-8e52-5fe854426c89\") " pod="openstack/ovn-controller-s2ngj" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.689174 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/63f387cd-1877-425f-8e52-5fe854426c89-var-run-ovn\") pod \"ovn-controller-s2ngj\" (UID: \"63f387cd-1877-425f-8e52-5fe854426c89\") " pod="openstack/ovn-controller-s2ngj" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.691758 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/63f387cd-1877-425f-8e52-5fe854426c89-scripts\") pod \"ovn-controller-s2ngj\" (UID: \"63f387cd-1877-425f-8e52-5fe854426c89\") " pod="openstack/ovn-controller-s2ngj" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.712479 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kwt47\" (UniqueName: \"kubernetes.io/projected/63f387cd-1877-425f-8e52-5fe854426c89-kube-api-access-kwt47\") pod \"ovn-controller-s2ngj\" (UID: \"63f387cd-1877-425f-8e52-5fe854426c89\") " pod="openstack/ovn-controller-s2ngj" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.790729 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e-scripts\") pod \"ovn-controller-ovs-gn4cl\" (UID: \"4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e\") " pod="openstack/ovn-controller-ovs-gn4cl" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.790777 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e-var-lib\") pod \"ovn-controller-ovs-gn4cl\" (UID: \"4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e\") " pod="openstack/ovn-controller-ovs-gn4cl" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.790818 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e-var-log\") pod \"ovn-controller-ovs-gn4cl\" (UID: \"4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e\") " pod="openstack/ovn-controller-ovs-gn4cl" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.790880 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e-etc-ovs\") pod \"ovn-controller-ovs-gn4cl\" (UID: \"4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e\") " pod="openstack/ovn-controller-ovs-gn4cl" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.790949 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e-var-run\") pod \"ovn-controller-ovs-gn4cl\" (UID: \"4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e\") " pod="openstack/ovn-controller-ovs-gn4cl" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.790993 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gwk7p\" (UniqueName: \"kubernetes.io/projected/4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e-kube-api-access-gwk7p\") pod \"ovn-controller-ovs-gn4cl\" (UID: \"4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e\") " pod="openstack/ovn-controller-ovs-gn4cl" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.791055 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e-var-log\") pod \"ovn-controller-ovs-gn4cl\" (UID: \"4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e\") " pod="openstack/ovn-controller-ovs-gn4cl" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.791086 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e-etc-ovs\") pod \"ovn-controller-ovs-gn4cl\" (UID: \"4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e\") " pod="openstack/ovn-controller-ovs-gn4cl" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.791111 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e-var-lib\") pod \"ovn-controller-ovs-gn4cl\" (UID: \"4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e\") " pod="openstack/ovn-controller-ovs-gn4cl" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.791194 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e-var-run\") pod \"ovn-controller-ovs-gn4cl\" (UID: \"4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e\") " pod="openstack/ovn-controller-ovs-gn4cl" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.792621 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e-scripts\") pod \"ovn-controller-ovs-gn4cl\" (UID: \"4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e\") " pod="openstack/ovn-controller-ovs-gn4cl" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.813752 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gwk7p\" (UniqueName: \"kubernetes.io/projected/4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e-kube-api-access-gwk7p\") pod \"ovn-controller-ovs-gn4cl\" (UID: \"4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e\") " pod="openstack/ovn-controller-ovs-gn4cl" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.874418 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s2ngj" Jan 05 23:28:40 crc kubenswrapper[4910]: I0105 23:28:40.931402 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-gn4cl" Jan 05 23:28:41 crc kubenswrapper[4910]: I0105 23:28:41.479803 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-s2ngj"] Jan 05 23:28:41 crc kubenswrapper[4910]: I0105 23:28:41.758483 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-gn4cl"] Jan 05 23:28:41 crc kubenswrapper[4910]: I0105 23:28:41.811643 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-s2ngj" event={"ID":"63f387cd-1877-425f-8e52-5fe854426c89","Type":"ContainerStarted","Data":"2a1efd7dd438ab71132eb1faab48decad4826fda8b1af6da5c4869c924892ec3"} Jan 05 23:28:41 crc kubenswrapper[4910]: I0105 23:28:41.812840 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-gn4cl" event={"ID":"4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e","Type":"ContainerStarted","Data":"0a0fa6b6e10c193afa6943382f92979b9314b4e622a246dde38fc986e3cc07ac"} Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.113593 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-bbzzm"] Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.115731 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-bbzzm" Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.117694 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.122461 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-bbzzm"] Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.270956 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/dbf59dc9-3276-47cc-8637-829289d0ba8c-ovs-rundir\") pod \"ovn-controller-metrics-bbzzm\" (UID: \"dbf59dc9-3276-47cc-8637-829289d0ba8c\") " pod="openstack/ovn-controller-metrics-bbzzm" Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.271007 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/dbf59dc9-3276-47cc-8637-829289d0ba8c-ovn-rundir\") pod \"ovn-controller-metrics-bbzzm\" (UID: \"dbf59dc9-3276-47cc-8637-829289d0ba8c\") " pod="openstack/ovn-controller-metrics-bbzzm" Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.271046 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8zwrr\" (UniqueName: \"kubernetes.io/projected/dbf59dc9-3276-47cc-8637-829289d0ba8c-kube-api-access-8zwrr\") pod \"ovn-controller-metrics-bbzzm\" (UID: \"dbf59dc9-3276-47cc-8637-829289d0ba8c\") " pod="openstack/ovn-controller-metrics-bbzzm" Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.271069 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbf59dc9-3276-47cc-8637-829289d0ba8c-config\") pod \"ovn-controller-metrics-bbzzm\" (UID: \"dbf59dc9-3276-47cc-8637-829289d0ba8c\") " pod="openstack/ovn-controller-metrics-bbzzm" Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.372334 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/dbf59dc9-3276-47cc-8637-829289d0ba8c-ovs-rundir\") pod \"ovn-controller-metrics-bbzzm\" (UID: \"dbf59dc9-3276-47cc-8637-829289d0ba8c\") " pod="openstack/ovn-controller-metrics-bbzzm" Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.372393 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/dbf59dc9-3276-47cc-8637-829289d0ba8c-ovn-rundir\") pod \"ovn-controller-metrics-bbzzm\" (UID: \"dbf59dc9-3276-47cc-8637-829289d0ba8c\") " pod="openstack/ovn-controller-metrics-bbzzm" Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.372440 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8zwrr\" (UniqueName: \"kubernetes.io/projected/dbf59dc9-3276-47cc-8637-829289d0ba8c-kube-api-access-8zwrr\") pod \"ovn-controller-metrics-bbzzm\" (UID: \"dbf59dc9-3276-47cc-8637-829289d0ba8c\") " pod="openstack/ovn-controller-metrics-bbzzm" Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.372463 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbf59dc9-3276-47cc-8637-829289d0ba8c-config\") pod \"ovn-controller-metrics-bbzzm\" (UID: \"dbf59dc9-3276-47cc-8637-829289d0ba8c\") " pod="openstack/ovn-controller-metrics-bbzzm" Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.372706 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/dbf59dc9-3276-47cc-8637-829289d0ba8c-ovs-rundir\") pod \"ovn-controller-metrics-bbzzm\" (UID: \"dbf59dc9-3276-47cc-8637-829289d0ba8c\") " pod="openstack/ovn-controller-metrics-bbzzm" Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.372749 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/dbf59dc9-3276-47cc-8637-829289d0ba8c-ovn-rundir\") pod \"ovn-controller-metrics-bbzzm\" (UID: \"dbf59dc9-3276-47cc-8637-829289d0ba8c\") " pod="openstack/ovn-controller-metrics-bbzzm" Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.373271 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbf59dc9-3276-47cc-8637-829289d0ba8c-config\") pod \"ovn-controller-metrics-bbzzm\" (UID: \"dbf59dc9-3276-47cc-8637-829289d0ba8c\") " pod="openstack/ovn-controller-metrics-bbzzm" Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.392279 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8zwrr\" (UniqueName: \"kubernetes.io/projected/dbf59dc9-3276-47cc-8637-829289d0ba8c-kube-api-access-8zwrr\") pod \"ovn-controller-metrics-bbzzm\" (UID: \"dbf59dc9-3276-47cc-8637-829289d0ba8c\") " pod="openstack/ovn-controller-metrics-bbzzm" Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.479076 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-bbzzm" Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.535663 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-db-create-7hvhq"] Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.537176 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-7hvhq" Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.552177 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-create-7hvhq"] Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.678470 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qfjlh\" (UniqueName: \"kubernetes.io/projected/c52cd7ca-bfd0-4427-96c8-2bb374ac756d-kube-api-access-qfjlh\") pod \"octavia-db-create-7hvhq\" (UID: \"c52cd7ca-bfd0-4427-96c8-2bb374ac756d\") " pod="openstack/octavia-db-create-7hvhq" Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.678540 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c52cd7ca-bfd0-4427-96c8-2bb374ac756d-operator-scripts\") pod \"octavia-db-create-7hvhq\" (UID: \"c52cd7ca-bfd0-4427-96c8-2bb374ac756d\") " pod="openstack/octavia-db-create-7hvhq" Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.780463 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c52cd7ca-bfd0-4427-96c8-2bb374ac756d-operator-scripts\") pod \"octavia-db-create-7hvhq\" (UID: \"c52cd7ca-bfd0-4427-96c8-2bb374ac756d\") " pod="openstack/octavia-db-create-7hvhq" Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.781724 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qfjlh\" (UniqueName: \"kubernetes.io/projected/c52cd7ca-bfd0-4427-96c8-2bb374ac756d-kube-api-access-qfjlh\") pod \"octavia-db-create-7hvhq\" (UID: \"c52cd7ca-bfd0-4427-96c8-2bb374ac756d\") " pod="openstack/octavia-db-create-7hvhq" Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.781727 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c52cd7ca-bfd0-4427-96c8-2bb374ac756d-operator-scripts\") pod \"octavia-db-create-7hvhq\" (UID: \"c52cd7ca-bfd0-4427-96c8-2bb374ac756d\") " pod="openstack/octavia-db-create-7hvhq" Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.798722 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qfjlh\" (UniqueName: \"kubernetes.io/projected/c52cd7ca-bfd0-4427-96c8-2bb374ac756d-kube-api-access-qfjlh\") pod \"octavia-db-create-7hvhq\" (UID: \"c52cd7ca-bfd0-4427-96c8-2bb374ac756d\") " pod="openstack/octavia-db-create-7hvhq" Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.825448 4910 generic.go:334] "Generic (PLEG): container finished" podID="4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e" containerID="6e3eff8d3ec4b39162f96435808d5e0fa87654ec97904ec523d5d7e6e8930de1" exitCode=0 Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.826095 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-gn4cl" event={"ID":"4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e","Type":"ContainerDied","Data":"6e3eff8d3ec4b39162f96435808d5e0fa87654ec97904ec523d5d7e6e8930de1"} Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.829637 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-s2ngj" event={"ID":"63f387cd-1877-425f-8e52-5fe854426c89","Type":"ContainerStarted","Data":"ae2dca333535d10becdac2f1ae41ca188d0e24ce47ea02ea7b3ee4908aa84e7f"} Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.829731 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-s2ngj" Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.861136 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-7hvhq" Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.868164 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-s2ngj" podStartSLOduration=2.868145952 podStartE2EDuration="2.868145952s" podCreationTimestamp="2026-01-05 23:28:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:28:42.865082136 +0000 UTC m=+5854.442579806" watchObservedRunningTime="2026-01-05 23:28:42.868145952 +0000 UTC m=+5854.445643622" Jan 05 23:28:42 crc kubenswrapper[4910]: I0105 23:28:42.955937 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-bbzzm"] Jan 05 23:28:43 crc kubenswrapper[4910]: I0105 23:28:43.356902 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-create-7hvhq"] Jan 05 23:28:43 crc kubenswrapper[4910]: I0105 23:28:43.839772 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-bbzzm" event={"ID":"dbf59dc9-3276-47cc-8637-829289d0ba8c","Type":"ContainerStarted","Data":"b3134687ad22a9a2882ee96ff8dab9f6a3e62fab1c412977922f4f7a4f942e75"} Jan 05 23:28:43 crc kubenswrapper[4910]: I0105 23:28:43.840247 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-bbzzm" event={"ID":"dbf59dc9-3276-47cc-8637-829289d0ba8c","Type":"ContainerStarted","Data":"cfc2af788fb535363378eaa828d597237bd2fe5f65986cce24833297f4af08f4"} Jan 05 23:28:43 crc kubenswrapper[4910]: I0105 23:28:43.843165 4910 generic.go:334] "Generic (PLEG): container finished" podID="c52cd7ca-bfd0-4427-96c8-2bb374ac756d" containerID="a434e15b571fd7736aae4419c8f461fdf4b437a2181e36fd4d09cb5f516b7895" exitCode=0 Jan 05 23:28:43 crc kubenswrapper[4910]: I0105 23:28:43.843307 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-create-7hvhq" event={"ID":"c52cd7ca-bfd0-4427-96c8-2bb374ac756d","Type":"ContainerDied","Data":"a434e15b571fd7736aae4419c8f461fdf4b437a2181e36fd4d09cb5f516b7895"} Jan 05 23:28:43 crc kubenswrapper[4910]: I0105 23:28:43.843382 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-create-7hvhq" event={"ID":"c52cd7ca-bfd0-4427-96c8-2bb374ac756d","Type":"ContainerStarted","Data":"03e69c3a263753914485006dc7aca01f4cfd7bdadd62e191b8aef71741db708c"} Jan 05 23:28:43 crc kubenswrapper[4910]: I0105 23:28:43.846779 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-gn4cl" event={"ID":"4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e","Type":"ContainerStarted","Data":"c32ff5ce2c2ccb3e944c0605285ccab1ad49c60cd00534b95f018c874b977808"} Jan 05 23:28:43 crc kubenswrapper[4910]: I0105 23:28:43.846931 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-gn4cl" event={"ID":"4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e","Type":"ContainerStarted","Data":"fc61ae4dedc37f652c45a47a614831d0738b073275b64307a52f422e6f09ba5b"} Jan 05 23:28:43 crc kubenswrapper[4910]: I0105 23:28:43.859532 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-5828-account-create-update-f98gq"] Jan 05 23:28:43 crc kubenswrapper[4910]: I0105 23:28:43.860915 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-5828-account-create-update-f98gq" Jan 05 23:28:43 crc kubenswrapper[4910]: I0105 23:28:43.863156 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-db-secret" Jan 05 23:28:43 crc kubenswrapper[4910]: I0105 23:28:43.863944 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-bbzzm" podStartSLOduration=1.863921645 podStartE2EDuration="1.863921645s" podCreationTimestamp="2026-01-05 23:28:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:28:43.860303405 +0000 UTC m=+5855.437801085" watchObservedRunningTime="2026-01-05 23:28:43.863921645 +0000 UTC m=+5855.441419325" Jan 05 23:28:43 crc kubenswrapper[4910]: I0105 23:28:43.892852 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-5828-account-create-update-f98gq"] Jan 05 23:28:43 crc kubenswrapper[4910]: I0105 23:28:43.900387 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-gn4cl" podStartSLOduration=3.900366708 podStartE2EDuration="3.900366708s" podCreationTimestamp="2026-01-05 23:28:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:28:43.889810726 +0000 UTC m=+5855.467308396" watchObservedRunningTime="2026-01-05 23:28:43.900366708 +0000 UTC m=+5855.477864378" Jan 05 23:28:44 crc kubenswrapper[4910]: I0105 23:28:44.013531 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lsdgc\" (UniqueName: \"kubernetes.io/projected/b4f2da7e-a606-4a4e-bfed-8605e842ccbc-kube-api-access-lsdgc\") pod \"octavia-5828-account-create-update-f98gq\" (UID: \"b4f2da7e-a606-4a4e-bfed-8605e842ccbc\") " pod="openstack/octavia-5828-account-create-update-f98gq" Jan 05 23:28:44 crc kubenswrapper[4910]: I0105 23:28:44.013642 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b4f2da7e-a606-4a4e-bfed-8605e842ccbc-operator-scripts\") pod \"octavia-5828-account-create-update-f98gq\" (UID: \"b4f2da7e-a606-4a4e-bfed-8605e842ccbc\") " pod="openstack/octavia-5828-account-create-update-f98gq" Jan 05 23:28:44 crc kubenswrapper[4910]: I0105 23:28:44.115943 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b4f2da7e-a606-4a4e-bfed-8605e842ccbc-operator-scripts\") pod \"octavia-5828-account-create-update-f98gq\" (UID: \"b4f2da7e-a606-4a4e-bfed-8605e842ccbc\") " pod="openstack/octavia-5828-account-create-update-f98gq" Jan 05 23:28:44 crc kubenswrapper[4910]: I0105 23:28:44.116202 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lsdgc\" (UniqueName: \"kubernetes.io/projected/b4f2da7e-a606-4a4e-bfed-8605e842ccbc-kube-api-access-lsdgc\") pod \"octavia-5828-account-create-update-f98gq\" (UID: \"b4f2da7e-a606-4a4e-bfed-8605e842ccbc\") " pod="openstack/octavia-5828-account-create-update-f98gq" Jan 05 23:28:44 crc kubenswrapper[4910]: I0105 23:28:44.117410 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b4f2da7e-a606-4a4e-bfed-8605e842ccbc-operator-scripts\") pod \"octavia-5828-account-create-update-f98gq\" (UID: \"b4f2da7e-a606-4a4e-bfed-8605e842ccbc\") " pod="openstack/octavia-5828-account-create-update-f98gq" Jan 05 23:28:44 crc kubenswrapper[4910]: I0105 23:28:44.151016 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lsdgc\" (UniqueName: \"kubernetes.io/projected/b4f2da7e-a606-4a4e-bfed-8605e842ccbc-kube-api-access-lsdgc\") pod \"octavia-5828-account-create-update-f98gq\" (UID: \"b4f2da7e-a606-4a4e-bfed-8605e842ccbc\") " pod="openstack/octavia-5828-account-create-update-f98gq" Jan 05 23:28:44 crc kubenswrapper[4910]: I0105 23:28:44.180670 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-5828-account-create-update-f98gq" Jan 05 23:28:44 crc kubenswrapper[4910]: W0105 23:28:44.713874 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4f2da7e_a606_4a4e_bfed_8605e842ccbc.slice/crio-eba8929d5c628b09b85cb69ec185e57c1c2f341593e1efabffbe0c7b955ae637 WatchSource:0}: Error finding container eba8929d5c628b09b85cb69ec185e57c1c2f341593e1efabffbe0c7b955ae637: Status 404 returned error can't find the container with id eba8929d5c628b09b85cb69ec185e57c1c2f341593e1efabffbe0c7b955ae637 Jan 05 23:28:44 crc kubenswrapper[4910]: I0105 23:28:44.714486 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-5828-account-create-update-f98gq"] Jan 05 23:28:44 crc kubenswrapper[4910]: I0105 23:28:44.860920 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-5828-account-create-update-f98gq" event={"ID":"b4f2da7e-a606-4a4e-bfed-8605e842ccbc","Type":"ContainerStarted","Data":"eba8929d5c628b09b85cb69ec185e57c1c2f341593e1efabffbe0c7b955ae637"} Jan 05 23:28:44 crc kubenswrapper[4910]: I0105 23:28:44.862327 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-gn4cl" Jan 05 23:28:44 crc kubenswrapper[4910]: I0105 23:28:44.862475 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-gn4cl" Jan 05 23:28:45 crc kubenswrapper[4910]: I0105 23:28:45.235910 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-7hvhq" Jan 05 23:28:45 crc kubenswrapper[4910]: I0105 23:28:45.344348 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qfjlh\" (UniqueName: \"kubernetes.io/projected/c52cd7ca-bfd0-4427-96c8-2bb374ac756d-kube-api-access-qfjlh\") pod \"c52cd7ca-bfd0-4427-96c8-2bb374ac756d\" (UID: \"c52cd7ca-bfd0-4427-96c8-2bb374ac756d\") " Jan 05 23:28:45 crc kubenswrapper[4910]: I0105 23:28:45.344462 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c52cd7ca-bfd0-4427-96c8-2bb374ac756d-operator-scripts\") pod \"c52cd7ca-bfd0-4427-96c8-2bb374ac756d\" (UID: \"c52cd7ca-bfd0-4427-96c8-2bb374ac756d\") " Jan 05 23:28:45 crc kubenswrapper[4910]: I0105 23:28:45.345830 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c52cd7ca-bfd0-4427-96c8-2bb374ac756d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c52cd7ca-bfd0-4427-96c8-2bb374ac756d" (UID: "c52cd7ca-bfd0-4427-96c8-2bb374ac756d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:28:45 crc kubenswrapper[4910]: I0105 23:28:45.353495 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c52cd7ca-bfd0-4427-96c8-2bb374ac756d-kube-api-access-qfjlh" (OuterVolumeSpecName: "kube-api-access-qfjlh") pod "c52cd7ca-bfd0-4427-96c8-2bb374ac756d" (UID: "c52cd7ca-bfd0-4427-96c8-2bb374ac756d"). InnerVolumeSpecName "kube-api-access-qfjlh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:28:45 crc kubenswrapper[4910]: I0105 23:28:45.447832 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qfjlh\" (UniqueName: \"kubernetes.io/projected/c52cd7ca-bfd0-4427-96c8-2bb374ac756d-kube-api-access-qfjlh\") on node \"crc\" DevicePath \"\"" Jan 05 23:28:45 crc kubenswrapper[4910]: I0105 23:28:45.447871 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c52cd7ca-bfd0-4427-96c8-2bb374ac756d-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:28:45 crc kubenswrapper[4910]: I0105 23:28:45.872170 4910 generic.go:334] "Generic (PLEG): container finished" podID="b4f2da7e-a606-4a4e-bfed-8605e842ccbc" containerID="7a6156750d98992bba0b583a4ff8f76dd35d2da6b068aec266d478435baf5ef8" exitCode=0 Jan 05 23:28:45 crc kubenswrapper[4910]: I0105 23:28:45.872599 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-5828-account-create-update-f98gq" event={"ID":"b4f2da7e-a606-4a4e-bfed-8605e842ccbc","Type":"ContainerDied","Data":"7a6156750d98992bba0b583a4ff8f76dd35d2da6b068aec266d478435baf5ef8"} Jan 05 23:28:45 crc kubenswrapper[4910]: I0105 23:28:45.873954 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-create-7hvhq" event={"ID":"c52cd7ca-bfd0-4427-96c8-2bb374ac756d","Type":"ContainerDied","Data":"03e69c3a263753914485006dc7aca01f4cfd7bdadd62e191b8aef71741db708c"} Jan 05 23:28:45 crc kubenswrapper[4910]: I0105 23:28:45.873976 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="03e69c3a263753914485006dc7aca01f4cfd7bdadd62e191b8aef71741db708c" Jan 05 23:28:45 crc kubenswrapper[4910]: I0105 23:28:45.874061 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-7hvhq" Jan 05 23:28:46 crc kubenswrapper[4910]: I0105 23:28:46.248421 4910 scope.go:117] "RemoveContainer" containerID="df2d865949d3681db76f318e2b3ba32ad48a3b8b2f8946e10aaf2dcd443ce42c" Jan 05 23:28:46 crc kubenswrapper[4910]: I0105 23:28:46.278951 4910 scope.go:117] "RemoveContainer" containerID="1e420b4243fd90e6496d0d09bf20ff32be3ff121b6f6d92898005bc2d4b4d096" Jan 05 23:28:46 crc kubenswrapper[4910]: I0105 23:28:46.331701 4910 scope.go:117] "RemoveContainer" containerID="c92fb655cbf1202d690e79f6136e212ae5edb49b92b2c2ddbcef8ff18b8f3fc4" Jan 05 23:28:46 crc kubenswrapper[4910]: I0105 23:28:46.375996 4910 scope.go:117] "RemoveContainer" containerID="a9b37fdb4f8017e42ee2858351a369f458880c81d4608d761ec1b89f9d314b4b" Jan 05 23:28:47 crc kubenswrapper[4910]: I0105 23:28:47.387190 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-5828-account-create-update-f98gq" Jan 05 23:28:47 crc kubenswrapper[4910]: I0105 23:28:47.489900 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b4f2da7e-a606-4a4e-bfed-8605e842ccbc-operator-scripts\") pod \"b4f2da7e-a606-4a4e-bfed-8605e842ccbc\" (UID: \"b4f2da7e-a606-4a4e-bfed-8605e842ccbc\") " Jan 05 23:28:47 crc kubenswrapper[4910]: I0105 23:28:47.490180 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lsdgc\" (UniqueName: \"kubernetes.io/projected/b4f2da7e-a606-4a4e-bfed-8605e842ccbc-kube-api-access-lsdgc\") pod \"b4f2da7e-a606-4a4e-bfed-8605e842ccbc\" (UID: \"b4f2da7e-a606-4a4e-bfed-8605e842ccbc\") " Jan 05 23:28:47 crc kubenswrapper[4910]: I0105 23:28:47.490596 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4f2da7e-a606-4a4e-bfed-8605e842ccbc-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b4f2da7e-a606-4a4e-bfed-8605e842ccbc" (UID: "b4f2da7e-a606-4a4e-bfed-8605e842ccbc"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:28:47 crc kubenswrapper[4910]: I0105 23:28:47.491116 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b4f2da7e-a606-4a4e-bfed-8605e842ccbc-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:28:47 crc kubenswrapper[4910]: I0105 23:28:47.496533 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4f2da7e-a606-4a4e-bfed-8605e842ccbc-kube-api-access-lsdgc" (OuterVolumeSpecName: "kube-api-access-lsdgc") pod "b4f2da7e-a606-4a4e-bfed-8605e842ccbc" (UID: "b4f2da7e-a606-4a4e-bfed-8605e842ccbc"). InnerVolumeSpecName "kube-api-access-lsdgc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:28:47 crc kubenswrapper[4910]: I0105 23:28:47.593993 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lsdgc\" (UniqueName: \"kubernetes.io/projected/b4f2da7e-a606-4a4e-bfed-8605e842ccbc-kube-api-access-lsdgc\") on node \"crc\" DevicePath \"\"" Jan 05 23:28:47 crc kubenswrapper[4910]: I0105 23:28:47.902077 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-5828-account-create-update-f98gq" event={"ID":"b4f2da7e-a606-4a4e-bfed-8605e842ccbc","Type":"ContainerDied","Data":"eba8929d5c628b09b85cb69ec185e57c1c2f341593e1efabffbe0c7b955ae637"} Jan 05 23:28:47 crc kubenswrapper[4910]: I0105 23:28:47.902493 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eba8929d5c628b09b85cb69ec185e57c1c2f341593e1efabffbe0c7b955ae637" Jan 05 23:28:47 crc kubenswrapper[4910]: I0105 23:28:47.902174 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-5828-account-create-update-f98gq" Jan 05 23:28:49 crc kubenswrapper[4910]: I0105 23:28:49.038911 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-persistence-db-create-6vb2b"] Jan 05 23:28:49 crc kubenswrapper[4910]: E0105 23:28:49.039676 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4f2da7e-a606-4a4e-bfed-8605e842ccbc" containerName="mariadb-account-create-update" Jan 05 23:28:49 crc kubenswrapper[4910]: I0105 23:28:49.039691 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4f2da7e-a606-4a4e-bfed-8605e842ccbc" containerName="mariadb-account-create-update" Jan 05 23:28:49 crc kubenswrapper[4910]: E0105 23:28:49.039735 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c52cd7ca-bfd0-4427-96c8-2bb374ac756d" containerName="mariadb-database-create" Jan 05 23:28:49 crc kubenswrapper[4910]: I0105 23:28:49.039743 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="c52cd7ca-bfd0-4427-96c8-2bb374ac756d" containerName="mariadb-database-create" Jan 05 23:28:49 crc kubenswrapper[4910]: I0105 23:28:49.039972 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4f2da7e-a606-4a4e-bfed-8605e842ccbc" containerName="mariadb-account-create-update" Jan 05 23:28:49 crc kubenswrapper[4910]: I0105 23:28:49.039992 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="c52cd7ca-bfd0-4427-96c8-2bb374ac756d" containerName="mariadb-database-create" Jan 05 23:28:49 crc kubenswrapper[4910]: I0105 23:28:49.040756 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-6vb2b" Jan 05 23:28:49 crc kubenswrapper[4910]: I0105 23:28:49.060113 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-persistence-db-create-6vb2b"] Jan 05 23:28:49 crc kubenswrapper[4910]: I0105 23:28:49.229882 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rmwfb\" (UniqueName: \"kubernetes.io/projected/437835eb-8eda-421e-8346-f8068f46e658-kube-api-access-rmwfb\") pod \"octavia-persistence-db-create-6vb2b\" (UID: \"437835eb-8eda-421e-8346-f8068f46e658\") " pod="openstack/octavia-persistence-db-create-6vb2b" Jan 05 23:28:49 crc kubenswrapper[4910]: I0105 23:28:49.230722 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/437835eb-8eda-421e-8346-f8068f46e658-operator-scripts\") pod \"octavia-persistence-db-create-6vb2b\" (UID: \"437835eb-8eda-421e-8346-f8068f46e658\") " pod="openstack/octavia-persistence-db-create-6vb2b" Jan 05 23:28:49 crc kubenswrapper[4910]: I0105 23:28:49.332768 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rmwfb\" (UniqueName: \"kubernetes.io/projected/437835eb-8eda-421e-8346-f8068f46e658-kube-api-access-rmwfb\") pod \"octavia-persistence-db-create-6vb2b\" (UID: \"437835eb-8eda-421e-8346-f8068f46e658\") " pod="openstack/octavia-persistence-db-create-6vb2b" Jan 05 23:28:49 crc kubenswrapper[4910]: I0105 23:28:49.332828 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/437835eb-8eda-421e-8346-f8068f46e658-operator-scripts\") pod \"octavia-persistence-db-create-6vb2b\" (UID: \"437835eb-8eda-421e-8346-f8068f46e658\") " pod="openstack/octavia-persistence-db-create-6vb2b" Jan 05 23:28:49 crc kubenswrapper[4910]: I0105 23:28:49.333635 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/437835eb-8eda-421e-8346-f8068f46e658-operator-scripts\") pod \"octavia-persistence-db-create-6vb2b\" (UID: \"437835eb-8eda-421e-8346-f8068f46e658\") " pod="openstack/octavia-persistence-db-create-6vb2b" Jan 05 23:28:49 crc kubenswrapper[4910]: I0105 23:28:49.357932 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rmwfb\" (UniqueName: \"kubernetes.io/projected/437835eb-8eda-421e-8346-f8068f46e658-kube-api-access-rmwfb\") pod \"octavia-persistence-db-create-6vb2b\" (UID: \"437835eb-8eda-421e-8346-f8068f46e658\") " pod="openstack/octavia-persistence-db-create-6vb2b" Jan 05 23:28:49 crc kubenswrapper[4910]: I0105 23:28:49.367581 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-6vb2b" Jan 05 23:28:49 crc kubenswrapper[4910]: I0105 23:28:49.866745 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-persistence-db-create-6vb2b"] Jan 05 23:28:49 crc kubenswrapper[4910]: I0105 23:28:49.939735 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-persistence-db-create-6vb2b" event={"ID":"437835eb-8eda-421e-8346-f8068f46e658","Type":"ContainerStarted","Data":"a821d9d73e7140e219ca2ed0a32b26c86829658a5fad38a792d0a4c658e99bd1"} Jan 05 23:28:50 crc kubenswrapper[4910]: I0105 23:28:50.388756 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-c475-account-create-update-7gdht"] Jan 05 23:28:50 crc kubenswrapper[4910]: I0105 23:28:50.391304 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-c475-account-create-update-7gdht" Jan 05 23:28:50 crc kubenswrapper[4910]: I0105 23:28:50.394098 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-persistence-db-secret" Jan 05 23:28:50 crc kubenswrapper[4910]: I0105 23:28:50.417853 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-c475-account-create-update-7gdht"] Jan 05 23:28:50 crc kubenswrapper[4910]: I0105 23:28:50.560869 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e077484e-3a30-46f8-abf2-dc5d267fc72a-operator-scripts\") pod \"octavia-c475-account-create-update-7gdht\" (UID: \"e077484e-3a30-46f8-abf2-dc5d267fc72a\") " pod="openstack/octavia-c475-account-create-update-7gdht" Jan 05 23:28:50 crc kubenswrapper[4910]: I0105 23:28:50.560934 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8w4t\" (UniqueName: \"kubernetes.io/projected/e077484e-3a30-46f8-abf2-dc5d267fc72a-kube-api-access-p8w4t\") pod \"octavia-c475-account-create-update-7gdht\" (UID: \"e077484e-3a30-46f8-abf2-dc5d267fc72a\") " pod="openstack/octavia-c475-account-create-update-7gdht" Jan 05 23:28:50 crc kubenswrapper[4910]: I0105 23:28:50.662366 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e077484e-3a30-46f8-abf2-dc5d267fc72a-operator-scripts\") pod \"octavia-c475-account-create-update-7gdht\" (UID: \"e077484e-3a30-46f8-abf2-dc5d267fc72a\") " pod="openstack/octavia-c475-account-create-update-7gdht" Jan 05 23:28:50 crc kubenswrapper[4910]: I0105 23:28:50.662456 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8w4t\" (UniqueName: \"kubernetes.io/projected/e077484e-3a30-46f8-abf2-dc5d267fc72a-kube-api-access-p8w4t\") pod \"octavia-c475-account-create-update-7gdht\" (UID: \"e077484e-3a30-46f8-abf2-dc5d267fc72a\") " pod="openstack/octavia-c475-account-create-update-7gdht" Jan 05 23:28:50 crc kubenswrapper[4910]: I0105 23:28:50.663393 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e077484e-3a30-46f8-abf2-dc5d267fc72a-operator-scripts\") pod \"octavia-c475-account-create-update-7gdht\" (UID: \"e077484e-3a30-46f8-abf2-dc5d267fc72a\") " pod="openstack/octavia-c475-account-create-update-7gdht" Jan 05 23:28:50 crc kubenswrapper[4910]: I0105 23:28:50.693238 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8w4t\" (UniqueName: \"kubernetes.io/projected/e077484e-3a30-46f8-abf2-dc5d267fc72a-kube-api-access-p8w4t\") pod \"octavia-c475-account-create-update-7gdht\" (UID: \"e077484e-3a30-46f8-abf2-dc5d267fc72a\") " pod="openstack/octavia-c475-account-create-update-7gdht" Jan 05 23:28:50 crc kubenswrapper[4910]: I0105 23:28:50.721496 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-c475-account-create-update-7gdht" Jan 05 23:28:50 crc kubenswrapper[4910]: I0105 23:28:50.951729 4910 generic.go:334] "Generic (PLEG): container finished" podID="437835eb-8eda-421e-8346-f8068f46e658" containerID="810574ec4c93b14b089672e0b6a92f77a23905799ba106662d8606d683771008" exitCode=0 Jan 05 23:28:50 crc kubenswrapper[4910]: I0105 23:28:50.952002 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-persistence-db-create-6vb2b" event={"ID":"437835eb-8eda-421e-8346-f8068f46e658","Type":"ContainerDied","Data":"810574ec4c93b14b089672e0b6a92f77a23905799ba106662d8606d683771008"} Jan 05 23:28:51 crc kubenswrapper[4910]: I0105 23:28:51.195865 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-c475-account-create-update-7gdht"] Jan 05 23:28:51 crc kubenswrapper[4910]: W0105 23:28:51.203240 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode077484e_3a30_46f8_abf2_dc5d267fc72a.slice/crio-168895d73eaddf4264e02229dbf4412f382e66873e98ca62d367444dd2e9bfcb WatchSource:0}: Error finding container 168895d73eaddf4264e02229dbf4412f382e66873e98ca62d367444dd2e9bfcb: Status 404 returned error can't find the container with id 168895d73eaddf4264e02229dbf4412f382e66873e98ca62d367444dd2e9bfcb Jan 05 23:28:51 crc kubenswrapper[4910]: I0105 23:28:51.966864 4910 generic.go:334] "Generic (PLEG): container finished" podID="e077484e-3a30-46f8-abf2-dc5d267fc72a" containerID="3b774d1715ddc3b1baa2080e164b94526f5e6509f680439a5170e186f92bf7e5" exitCode=0 Jan 05 23:28:51 crc kubenswrapper[4910]: I0105 23:28:51.966960 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-c475-account-create-update-7gdht" event={"ID":"e077484e-3a30-46f8-abf2-dc5d267fc72a","Type":"ContainerDied","Data":"3b774d1715ddc3b1baa2080e164b94526f5e6509f680439a5170e186f92bf7e5"} Jan 05 23:28:51 crc kubenswrapper[4910]: I0105 23:28:51.967273 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-c475-account-create-update-7gdht" event={"ID":"e077484e-3a30-46f8-abf2-dc5d267fc72a","Type":"ContainerStarted","Data":"168895d73eaddf4264e02229dbf4412f382e66873e98ca62d367444dd2e9bfcb"} Jan 05 23:28:52 crc kubenswrapper[4910]: I0105 23:28:52.472276 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-6vb2b" Jan 05 23:28:52 crc kubenswrapper[4910]: I0105 23:28:52.615488 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/437835eb-8eda-421e-8346-f8068f46e658-operator-scripts\") pod \"437835eb-8eda-421e-8346-f8068f46e658\" (UID: \"437835eb-8eda-421e-8346-f8068f46e658\") " Jan 05 23:28:52 crc kubenswrapper[4910]: I0105 23:28:52.615820 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rmwfb\" (UniqueName: \"kubernetes.io/projected/437835eb-8eda-421e-8346-f8068f46e658-kube-api-access-rmwfb\") pod \"437835eb-8eda-421e-8346-f8068f46e658\" (UID: \"437835eb-8eda-421e-8346-f8068f46e658\") " Jan 05 23:28:52 crc kubenswrapper[4910]: I0105 23:28:52.616453 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/437835eb-8eda-421e-8346-f8068f46e658-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "437835eb-8eda-421e-8346-f8068f46e658" (UID: "437835eb-8eda-421e-8346-f8068f46e658"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:28:52 crc kubenswrapper[4910]: I0105 23:28:52.616607 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/437835eb-8eda-421e-8346-f8068f46e658-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:28:52 crc kubenswrapper[4910]: I0105 23:28:52.622571 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/437835eb-8eda-421e-8346-f8068f46e658-kube-api-access-rmwfb" (OuterVolumeSpecName: "kube-api-access-rmwfb") pod "437835eb-8eda-421e-8346-f8068f46e658" (UID: "437835eb-8eda-421e-8346-f8068f46e658"). InnerVolumeSpecName "kube-api-access-rmwfb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:28:52 crc kubenswrapper[4910]: I0105 23:28:52.718762 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rmwfb\" (UniqueName: \"kubernetes.io/projected/437835eb-8eda-421e-8346-f8068f46e658-kube-api-access-rmwfb\") on node \"crc\" DevicePath \"\"" Jan 05 23:28:52 crc kubenswrapper[4910]: I0105 23:28:52.984538 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-persistence-db-create-6vb2b" event={"ID":"437835eb-8eda-421e-8346-f8068f46e658","Type":"ContainerDied","Data":"a821d9d73e7140e219ca2ed0a32b26c86829658a5fad38a792d0a4c658e99bd1"} Jan 05 23:28:52 crc kubenswrapper[4910]: I0105 23:28:52.984617 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a821d9d73e7140e219ca2ed0a32b26c86829658a5fad38a792d0a4c658e99bd1" Jan 05 23:28:52 crc kubenswrapper[4910]: I0105 23:28:52.984796 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-6vb2b" Jan 05 23:28:53 crc kubenswrapper[4910]: I0105 23:28:53.473099 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-c475-account-create-update-7gdht" Jan 05 23:28:53 crc kubenswrapper[4910]: I0105 23:28:53.657803 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p8w4t\" (UniqueName: \"kubernetes.io/projected/e077484e-3a30-46f8-abf2-dc5d267fc72a-kube-api-access-p8w4t\") pod \"e077484e-3a30-46f8-abf2-dc5d267fc72a\" (UID: \"e077484e-3a30-46f8-abf2-dc5d267fc72a\") " Jan 05 23:28:53 crc kubenswrapper[4910]: I0105 23:28:53.657975 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e077484e-3a30-46f8-abf2-dc5d267fc72a-operator-scripts\") pod \"e077484e-3a30-46f8-abf2-dc5d267fc72a\" (UID: \"e077484e-3a30-46f8-abf2-dc5d267fc72a\") " Jan 05 23:28:53 crc kubenswrapper[4910]: I0105 23:28:53.659091 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e077484e-3a30-46f8-abf2-dc5d267fc72a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e077484e-3a30-46f8-abf2-dc5d267fc72a" (UID: "e077484e-3a30-46f8-abf2-dc5d267fc72a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:28:53 crc kubenswrapper[4910]: I0105 23:28:53.677689 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e077484e-3a30-46f8-abf2-dc5d267fc72a-kube-api-access-p8w4t" (OuterVolumeSpecName: "kube-api-access-p8w4t") pod "e077484e-3a30-46f8-abf2-dc5d267fc72a" (UID: "e077484e-3a30-46f8-abf2-dc5d267fc72a"). InnerVolumeSpecName "kube-api-access-p8w4t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:28:53 crc kubenswrapper[4910]: I0105 23:28:53.761917 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p8w4t\" (UniqueName: \"kubernetes.io/projected/e077484e-3a30-46f8-abf2-dc5d267fc72a-kube-api-access-p8w4t\") on node \"crc\" DevicePath \"\"" Jan 05 23:28:53 crc kubenswrapper[4910]: I0105 23:28:53.761982 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e077484e-3a30-46f8-abf2-dc5d267fc72a-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:28:54 crc kubenswrapper[4910]: I0105 23:28:54.000819 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-c475-account-create-update-7gdht" event={"ID":"e077484e-3a30-46f8-abf2-dc5d267fc72a","Type":"ContainerDied","Data":"168895d73eaddf4264e02229dbf4412f382e66873e98ca62d367444dd2e9bfcb"} Jan 05 23:28:54 crc kubenswrapper[4910]: I0105 23:28:54.001154 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="168895d73eaddf4264e02229dbf4412f382e66873e98ca62d367444dd2e9bfcb" Jan 05 23:28:54 crc kubenswrapper[4910]: I0105 23:28:54.000944 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-c475-account-create-update-7gdht" Jan 05 23:28:56 crc kubenswrapper[4910]: I0105 23:28:56.769364 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-api-584c6dd788-6g8mm"] Jan 05 23:28:56 crc kubenswrapper[4910]: E0105 23:28:56.774542 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e077484e-3a30-46f8-abf2-dc5d267fc72a" containerName="mariadb-account-create-update" Jan 05 23:28:56 crc kubenswrapper[4910]: I0105 23:28:56.774571 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="e077484e-3a30-46f8-abf2-dc5d267fc72a" containerName="mariadb-account-create-update" Jan 05 23:28:56 crc kubenswrapper[4910]: E0105 23:28:56.774596 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="437835eb-8eda-421e-8346-f8068f46e658" containerName="mariadb-database-create" Jan 05 23:28:56 crc kubenswrapper[4910]: I0105 23:28:56.774603 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="437835eb-8eda-421e-8346-f8068f46e658" containerName="mariadb-database-create" Jan 05 23:28:56 crc kubenswrapper[4910]: I0105 23:28:56.776134 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="437835eb-8eda-421e-8346-f8068f46e658" containerName="mariadb-database-create" Jan 05 23:28:56 crc kubenswrapper[4910]: I0105 23:28:56.776157 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="e077484e-3a30-46f8-abf2-dc5d267fc72a" containerName="mariadb-account-create-update" Jan 05 23:28:56 crc kubenswrapper[4910]: I0105 23:28:56.777765 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-api-584c6dd788-6g8mm"] Jan 05 23:28:56 crc kubenswrapper[4910]: I0105 23:28:56.777883 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-584c6dd788-6g8mm" Jan 05 23:28:56 crc kubenswrapper[4910]: I0105 23:28:56.782916 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-api-config-data" Jan 05 23:28:56 crc kubenswrapper[4910]: I0105 23:28:56.783098 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-octavia-dockercfg-vsbsh" Jan 05 23:28:56 crc kubenswrapper[4910]: I0105 23:28:56.789189 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-api-scripts" Jan 05 23:28:56 crc kubenswrapper[4910]: I0105 23:28:56.853806 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/e8831b55-8e5e-4003-ac04-31b3af151959-octavia-run\") pod \"octavia-api-584c6dd788-6g8mm\" (UID: \"e8831b55-8e5e-4003-ac04-31b3af151959\") " pod="openstack/octavia-api-584c6dd788-6g8mm" Jan 05 23:28:56 crc kubenswrapper[4910]: I0105 23:28:56.853877 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8831b55-8e5e-4003-ac04-31b3af151959-config-data\") pod \"octavia-api-584c6dd788-6g8mm\" (UID: \"e8831b55-8e5e-4003-ac04-31b3af151959\") " pod="openstack/octavia-api-584c6dd788-6g8mm" Jan 05 23:28:56 crc kubenswrapper[4910]: I0105 23:28:56.853939 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8831b55-8e5e-4003-ac04-31b3af151959-combined-ca-bundle\") pod \"octavia-api-584c6dd788-6g8mm\" (UID: \"e8831b55-8e5e-4003-ac04-31b3af151959\") " pod="openstack/octavia-api-584c6dd788-6g8mm" Jan 05 23:28:56 crc kubenswrapper[4910]: I0105 23:28:56.853995 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/e8831b55-8e5e-4003-ac04-31b3af151959-config-data-merged\") pod \"octavia-api-584c6dd788-6g8mm\" (UID: \"e8831b55-8e5e-4003-ac04-31b3af151959\") " pod="openstack/octavia-api-584c6dd788-6g8mm" Jan 05 23:28:56 crc kubenswrapper[4910]: I0105 23:28:56.854056 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e8831b55-8e5e-4003-ac04-31b3af151959-scripts\") pod \"octavia-api-584c6dd788-6g8mm\" (UID: \"e8831b55-8e5e-4003-ac04-31b3af151959\") " pod="openstack/octavia-api-584c6dd788-6g8mm" Jan 05 23:28:56 crc kubenswrapper[4910]: I0105 23:28:56.956462 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/e8831b55-8e5e-4003-ac04-31b3af151959-octavia-run\") pod \"octavia-api-584c6dd788-6g8mm\" (UID: \"e8831b55-8e5e-4003-ac04-31b3af151959\") " pod="openstack/octavia-api-584c6dd788-6g8mm" Jan 05 23:28:56 crc kubenswrapper[4910]: I0105 23:28:56.956528 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8831b55-8e5e-4003-ac04-31b3af151959-config-data\") pod \"octavia-api-584c6dd788-6g8mm\" (UID: \"e8831b55-8e5e-4003-ac04-31b3af151959\") " pod="openstack/octavia-api-584c6dd788-6g8mm" Jan 05 23:28:56 crc kubenswrapper[4910]: I0105 23:28:56.956590 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8831b55-8e5e-4003-ac04-31b3af151959-combined-ca-bundle\") pod \"octavia-api-584c6dd788-6g8mm\" (UID: \"e8831b55-8e5e-4003-ac04-31b3af151959\") " pod="openstack/octavia-api-584c6dd788-6g8mm" Jan 05 23:28:56 crc kubenswrapper[4910]: I0105 23:28:56.956650 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/e8831b55-8e5e-4003-ac04-31b3af151959-config-data-merged\") pod \"octavia-api-584c6dd788-6g8mm\" (UID: \"e8831b55-8e5e-4003-ac04-31b3af151959\") " pod="openstack/octavia-api-584c6dd788-6g8mm" Jan 05 23:28:56 crc kubenswrapper[4910]: I0105 23:28:56.956715 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e8831b55-8e5e-4003-ac04-31b3af151959-scripts\") pod \"octavia-api-584c6dd788-6g8mm\" (UID: \"e8831b55-8e5e-4003-ac04-31b3af151959\") " pod="openstack/octavia-api-584c6dd788-6g8mm" Jan 05 23:28:56 crc kubenswrapper[4910]: I0105 23:28:56.957441 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/e8831b55-8e5e-4003-ac04-31b3af151959-octavia-run\") pod \"octavia-api-584c6dd788-6g8mm\" (UID: \"e8831b55-8e5e-4003-ac04-31b3af151959\") " pod="openstack/octavia-api-584c6dd788-6g8mm" Jan 05 23:28:56 crc kubenswrapper[4910]: I0105 23:28:56.958040 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/e8831b55-8e5e-4003-ac04-31b3af151959-config-data-merged\") pod \"octavia-api-584c6dd788-6g8mm\" (UID: \"e8831b55-8e5e-4003-ac04-31b3af151959\") " pod="openstack/octavia-api-584c6dd788-6g8mm" Jan 05 23:28:56 crc kubenswrapper[4910]: I0105 23:28:56.967785 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8831b55-8e5e-4003-ac04-31b3af151959-config-data\") pod \"octavia-api-584c6dd788-6g8mm\" (UID: \"e8831b55-8e5e-4003-ac04-31b3af151959\") " pod="openstack/octavia-api-584c6dd788-6g8mm" Jan 05 23:28:56 crc kubenswrapper[4910]: I0105 23:28:56.970480 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e8831b55-8e5e-4003-ac04-31b3af151959-scripts\") pod \"octavia-api-584c6dd788-6g8mm\" (UID: \"e8831b55-8e5e-4003-ac04-31b3af151959\") " pod="openstack/octavia-api-584c6dd788-6g8mm" Jan 05 23:28:56 crc kubenswrapper[4910]: I0105 23:28:56.971959 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8831b55-8e5e-4003-ac04-31b3af151959-combined-ca-bundle\") pod \"octavia-api-584c6dd788-6g8mm\" (UID: \"e8831b55-8e5e-4003-ac04-31b3af151959\") " pod="openstack/octavia-api-584c6dd788-6g8mm" Jan 05 23:28:57 crc kubenswrapper[4910]: I0105 23:28:57.107213 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-584c6dd788-6g8mm" Jan 05 23:28:57 crc kubenswrapper[4910]: I0105 23:28:57.576086 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-api-584c6dd788-6g8mm"] Jan 05 23:28:58 crc kubenswrapper[4910]: I0105 23:28:58.046357 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-584c6dd788-6g8mm" event={"ID":"e8831b55-8e5e-4003-ac04-31b3af151959","Type":"ContainerStarted","Data":"fe4bf3c403c37487fefc40e521240c90881d916a1b2f3702849aa99e824b3468"} Jan 05 23:29:06 crc kubenswrapper[4910]: I0105 23:29:06.169178 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-584c6dd788-6g8mm" event={"ID":"e8831b55-8e5e-4003-ac04-31b3af151959","Type":"ContainerStarted","Data":"371c12af24ada9edd2183c5b20b7ff1ceb85e8a869d45858dff33593843d35a2"} Jan 05 23:29:07 crc kubenswrapper[4910]: I0105 23:29:07.179892 4910 generic.go:334] "Generic (PLEG): container finished" podID="e8831b55-8e5e-4003-ac04-31b3af151959" containerID="371c12af24ada9edd2183c5b20b7ff1ceb85e8a869d45858dff33593843d35a2" exitCode=0 Jan 05 23:29:07 crc kubenswrapper[4910]: I0105 23:29:07.180139 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-584c6dd788-6g8mm" event={"ID":"e8831b55-8e5e-4003-ac04-31b3af151959","Type":"ContainerDied","Data":"371c12af24ada9edd2183c5b20b7ff1ceb85e8a869d45858dff33593843d35a2"} Jan 05 23:29:08 crc kubenswrapper[4910]: I0105 23:29:08.194580 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-584c6dd788-6g8mm" event={"ID":"e8831b55-8e5e-4003-ac04-31b3af151959","Type":"ContainerStarted","Data":"1c9e85ea7c49cc062101bef5a00dc5b520f35804b5b09d2995394293028aced9"} Jan 05 23:29:08 crc kubenswrapper[4910]: I0105 23:29:08.195490 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-584c6dd788-6g8mm" event={"ID":"e8831b55-8e5e-4003-ac04-31b3af151959","Type":"ContainerStarted","Data":"732932f8093632e3dcabbd873fb127fe1fbca0244ec41664af3130ac8f413e00"} Jan 05 23:29:08 crc kubenswrapper[4910]: I0105 23:29:08.195514 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-api-584c6dd788-6g8mm" Jan 05 23:29:08 crc kubenswrapper[4910]: I0105 23:29:08.217651 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-api-584c6dd788-6g8mm" podStartSLOduration=3.915130628 podStartE2EDuration="12.217625212s" podCreationTimestamp="2026-01-05 23:28:56 +0000 UTC" firstStartedPulling="2026-01-05 23:28:57.5875716 +0000 UTC m=+5869.165069280" lastFinishedPulling="2026-01-05 23:29:05.890066154 +0000 UTC m=+5877.467563864" observedRunningTime="2026-01-05 23:29:08.213219102 +0000 UTC m=+5879.790716792" watchObservedRunningTime="2026-01-05 23:29:08.217625212 +0000 UTC m=+5879.795122902" Jan 05 23:29:09 crc kubenswrapper[4910]: I0105 23:29:09.205551 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-api-584c6dd788-6g8mm" Jan 05 23:29:11 crc kubenswrapper[4910]: I0105 23:29:11.290326 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-rsyslog-qkp4b"] Jan 05 23:29:11 crc kubenswrapper[4910]: I0105 23:29:11.292078 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-rsyslog-qkp4b" Jan 05 23:29:11 crc kubenswrapper[4910]: I0105 23:29:11.295567 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"octavia-hmport-map" Jan 05 23:29:11 crc kubenswrapper[4910]: I0105 23:29:11.303484 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-rsyslog-config-data" Jan 05 23:29:11 crc kubenswrapper[4910]: I0105 23:29:11.304484 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-rsyslog-scripts" Jan 05 23:29:11 crc kubenswrapper[4910]: I0105 23:29:11.305269 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-rsyslog-qkp4b"] Jan 05 23:29:11 crc kubenswrapper[4910]: I0105 23:29:11.400299 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b05718f5-8878-4646-beaa-0cea45fcfda9-scripts\") pod \"octavia-rsyslog-qkp4b\" (UID: \"b05718f5-8878-4646-beaa-0cea45fcfda9\") " pod="openstack/octavia-rsyslog-qkp4b" Jan 05 23:29:11 crc kubenswrapper[4910]: I0105 23:29:11.400378 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/b05718f5-8878-4646-beaa-0cea45fcfda9-config-data-merged\") pod \"octavia-rsyslog-qkp4b\" (UID: \"b05718f5-8878-4646-beaa-0cea45fcfda9\") " pod="openstack/octavia-rsyslog-qkp4b" Jan 05 23:29:11 crc kubenswrapper[4910]: I0105 23:29:11.400425 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/b05718f5-8878-4646-beaa-0cea45fcfda9-hm-ports\") pod \"octavia-rsyslog-qkp4b\" (UID: \"b05718f5-8878-4646-beaa-0cea45fcfda9\") " pod="openstack/octavia-rsyslog-qkp4b" Jan 05 23:29:11 crc kubenswrapper[4910]: I0105 23:29:11.400453 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b05718f5-8878-4646-beaa-0cea45fcfda9-config-data\") pod \"octavia-rsyslog-qkp4b\" (UID: \"b05718f5-8878-4646-beaa-0cea45fcfda9\") " pod="openstack/octavia-rsyslog-qkp4b" Jan 05 23:29:11 crc kubenswrapper[4910]: I0105 23:29:11.502619 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b05718f5-8878-4646-beaa-0cea45fcfda9-scripts\") pod \"octavia-rsyslog-qkp4b\" (UID: \"b05718f5-8878-4646-beaa-0cea45fcfda9\") " pod="openstack/octavia-rsyslog-qkp4b" Jan 05 23:29:11 crc kubenswrapper[4910]: I0105 23:29:11.502783 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/b05718f5-8878-4646-beaa-0cea45fcfda9-config-data-merged\") pod \"octavia-rsyslog-qkp4b\" (UID: \"b05718f5-8878-4646-beaa-0cea45fcfda9\") " pod="openstack/octavia-rsyslog-qkp4b" Jan 05 23:29:11 crc kubenswrapper[4910]: I0105 23:29:11.502869 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/b05718f5-8878-4646-beaa-0cea45fcfda9-hm-ports\") pod \"octavia-rsyslog-qkp4b\" (UID: \"b05718f5-8878-4646-beaa-0cea45fcfda9\") " pod="openstack/octavia-rsyslog-qkp4b" Jan 05 23:29:11 crc kubenswrapper[4910]: I0105 23:29:11.502919 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b05718f5-8878-4646-beaa-0cea45fcfda9-config-data\") pod \"octavia-rsyslog-qkp4b\" (UID: \"b05718f5-8878-4646-beaa-0cea45fcfda9\") " pod="openstack/octavia-rsyslog-qkp4b" Jan 05 23:29:11 crc kubenswrapper[4910]: I0105 23:29:11.503568 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/b05718f5-8878-4646-beaa-0cea45fcfda9-config-data-merged\") pod \"octavia-rsyslog-qkp4b\" (UID: \"b05718f5-8878-4646-beaa-0cea45fcfda9\") " pod="openstack/octavia-rsyslog-qkp4b" Jan 05 23:29:11 crc kubenswrapper[4910]: I0105 23:29:11.504981 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/b05718f5-8878-4646-beaa-0cea45fcfda9-hm-ports\") pod \"octavia-rsyslog-qkp4b\" (UID: \"b05718f5-8878-4646-beaa-0cea45fcfda9\") " pod="openstack/octavia-rsyslog-qkp4b" Jan 05 23:29:11 crc kubenswrapper[4910]: I0105 23:29:11.529515 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b05718f5-8878-4646-beaa-0cea45fcfda9-scripts\") pod \"octavia-rsyslog-qkp4b\" (UID: \"b05718f5-8878-4646-beaa-0cea45fcfda9\") " pod="openstack/octavia-rsyslog-qkp4b" Jan 05 23:29:11 crc kubenswrapper[4910]: I0105 23:29:11.530905 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b05718f5-8878-4646-beaa-0cea45fcfda9-config-data\") pod \"octavia-rsyslog-qkp4b\" (UID: \"b05718f5-8878-4646-beaa-0cea45fcfda9\") " pod="openstack/octavia-rsyslog-qkp4b" Jan 05 23:29:11 crc kubenswrapper[4910]: I0105 23:29:11.658029 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-rsyslog-qkp4b" Jan 05 23:29:12 crc kubenswrapper[4910]: I0105 23:29:12.140829 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-image-upload-597bd57878-ctmfl"] Jan 05 23:29:12 crc kubenswrapper[4910]: I0105 23:29:12.143985 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-597bd57878-ctmfl" Jan 05 23:29:12 crc kubenswrapper[4910]: I0105 23:29:12.152822 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-config-data" Jan 05 23:29:12 crc kubenswrapper[4910]: I0105 23:29:12.155793 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-597bd57878-ctmfl"] Jan 05 23:29:12 crc kubenswrapper[4910]: I0105 23:29:12.226327 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6c6d4163-66d2-490d-9788-726d0585fecf-httpd-config\") pod \"octavia-image-upload-597bd57878-ctmfl\" (UID: \"6c6d4163-66d2-490d-9788-726d0585fecf\") " pod="openstack/octavia-image-upload-597bd57878-ctmfl" Jan 05 23:29:12 crc kubenswrapper[4910]: I0105 23:29:12.226517 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/6c6d4163-66d2-490d-9788-726d0585fecf-amphora-image\") pod \"octavia-image-upload-597bd57878-ctmfl\" (UID: \"6c6d4163-66d2-490d-9788-726d0585fecf\") " pod="openstack/octavia-image-upload-597bd57878-ctmfl" Jan 05 23:29:12 crc kubenswrapper[4910]: I0105 23:29:12.285658 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-rsyslog-qkp4b"] Jan 05 23:29:12 crc kubenswrapper[4910]: I0105 23:29:12.329561 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/6c6d4163-66d2-490d-9788-726d0585fecf-amphora-image\") pod \"octavia-image-upload-597bd57878-ctmfl\" (UID: \"6c6d4163-66d2-490d-9788-726d0585fecf\") " pod="openstack/octavia-image-upload-597bd57878-ctmfl" Jan 05 23:29:12 crc kubenswrapper[4910]: I0105 23:29:12.329657 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6c6d4163-66d2-490d-9788-726d0585fecf-httpd-config\") pod \"octavia-image-upload-597bd57878-ctmfl\" (UID: \"6c6d4163-66d2-490d-9788-726d0585fecf\") " pod="openstack/octavia-image-upload-597bd57878-ctmfl" Jan 05 23:29:12 crc kubenswrapper[4910]: I0105 23:29:12.331611 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/6c6d4163-66d2-490d-9788-726d0585fecf-amphora-image\") pod \"octavia-image-upload-597bd57878-ctmfl\" (UID: \"6c6d4163-66d2-490d-9788-726d0585fecf\") " pod="openstack/octavia-image-upload-597bd57878-ctmfl" Jan 05 23:29:12 crc kubenswrapper[4910]: I0105 23:29:12.341170 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6c6d4163-66d2-490d-9788-726d0585fecf-httpd-config\") pod \"octavia-image-upload-597bd57878-ctmfl\" (UID: \"6c6d4163-66d2-490d-9788-726d0585fecf\") " pod="openstack/octavia-image-upload-597bd57878-ctmfl" Jan 05 23:29:12 crc kubenswrapper[4910]: I0105 23:29:12.471177 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-597bd57878-ctmfl" Jan 05 23:29:12 crc kubenswrapper[4910]: I0105 23:29:12.976792 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-597bd57878-ctmfl"] Jan 05 23:29:12 crc kubenswrapper[4910]: W0105 23:29:12.978375 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6c6d4163_66d2_490d_9788_726d0585fecf.slice/crio-a1a53c03a6096d19f7c6dab06567385486989f0ec8484533ebd77d1c1fd1e1f6 WatchSource:0}: Error finding container a1a53c03a6096d19f7c6dab06567385486989f0ec8484533ebd77d1c1fd1e1f6: Status 404 returned error can't find the container with id a1a53c03a6096d19f7c6dab06567385486989f0ec8484533ebd77d1c1fd1e1f6 Jan 05 23:29:13 crc kubenswrapper[4910]: I0105 23:29:13.253519 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-qkp4b" event={"ID":"b05718f5-8878-4646-beaa-0cea45fcfda9","Type":"ContainerStarted","Data":"e0c221d3a40b2c05f0e231fb4f5989e4da5e3bd5c7fdd55c12fe123fecedffd2"} Jan 05 23:29:13 crc kubenswrapper[4910]: I0105 23:29:13.255395 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-597bd57878-ctmfl" event={"ID":"6c6d4163-66d2-490d-9788-726d0585fecf","Type":"ContainerStarted","Data":"a1a53c03a6096d19f7c6dab06567385486989f0ec8484533ebd77d1c1fd1e1f6"} Jan 05 23:29:13 crc kubenswrapper[4910]: I0105 23:29:13.510298 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-db-sync-fdhs8"] Jan 05 23:29:13 crc kubenswrapper[4910]: I0105 23:29:13.513319 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-fdhs8" Jan 05 23:29:13 crc kubenswrapper[4910]: I0105 23:29:13.517411 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-scripts" Jan 05 23:29:13 crc kubenswrapper[4910]: I0105 23:29:13.548893 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-sync-fdhs8"] Jan 05 23:29:13 crc kubenswrapper[4910]: I0105 23:29:13.573279 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f-scripts\") pod \"octavia-db-sync-fdhs8\" (UID: \"0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f\") " pod="openstack/octavia-db-sync-fdhs8" Jan 05 23:29:13 crc kubenswrapper[4910]: I0105 23:29:13.573369 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f-combined-ca-bundle\") pod \"octavia-db-sync-fdhs8\" (UID: \"0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f\") " pod="openstack/octavia-db-sync-fdhs8" Jan 05 23:29:13 crc kubenswrapper[4910]: I0105 23:29:13.573412 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f-config-data\") pod \"octavia-db-sync-fdhs8\" (UID: \"0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f\") " pod="openstack/octavia-db-sync-fdhs8" Jan 05 23:29:13 crc kubenswrapper[4910]: I0105 23:29:13.573491 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f-config-data-merged\") pod \"octavia-db-sync-fdhs8\" (UID: \"0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f\") " pod="openstack/octavia-db-sync-fdhs8" Jan 05 23:29:13 crc kubenswrapper[4910]: I0105 23:29:13.675270 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f-scripts\") pod \"octavia-db-sync-fdhs8\" (UID: \"0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f\") " pod="openstack/octavia-db-sync-fdhs8" Jan 05 23:29:13 crc kubenswrapper[4910]: I0105 23:29:13.675389 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f-combined-ca-bundle\") pod \"octavia-db-sync-fdhs8\" (UID: \"0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f\") " pod="openstack/octavia-db-sync-fdhs8" Jan 05 23:29:13 crc kubenswrapper[4910]: I0105 23:29:13.675424 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f-config-data\") pod \"octavia-db-sync-fdhs8\" (UID: \"0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f\") " pod="openstack/octavia-db-sync-fdhs8" Jan 05 23:29:13 crc kubenswrapper[4910]: I0105 23:29:13.675507 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f-config-data-merged\") pod \"octavia-db-sync-fdhs8\" (UID: \"0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f\") " pod="openstack/octavia-db-sync-fdhs8" Jan 05 23:29:13 crc kubenswrapper[4910]: I0105 23:29:13.676075 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f-config-data-merged\") pod \"octavia-db-sync-fdhs8\" (UID: \"0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f\") " pod="openstack/octavia-db-sync-fdhs8" Jan 05 23:29:13 crc kubenswrapper[4910]: I0105 23:29:13.683607 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f-scripts\") pod \"octavia-db-sync-fdhs8\" (UID: \"0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f\") " pod="openstack/octavia-db-sync-fdhs8" Jan 05 23:29:13 crc kubenswrapper[4910]: I0105 23:29:13.685564 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f-config-data\") pod \"octavia-db-sync-fdhs8\" (UID: \"0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f\") " pod="openstack/octavia-db-sync-fdhs8" Jan 05 23:29:13 crc kubenswrapper[4910]: I0105 23:29:13.685787 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f-combined-ca-bundle\") pod \"octavia-db-sync-fdhs8\" (UID: \"0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f\") " pod="openstack/octavia-db-sync-fdhs8" Jan 05 23:29:13 crc kubenswrapper[4910]: I0105 23:29:13.890917 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-fdhs8" Jan 05 23:29:14 crc kubenswrapper[4910]: I0105 23:29:14.888743 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-sync-fdhs8"] Jan 05 23:29:15 crc kubenswrapper[4910]: W0105 23:29:15.061727 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0a5a592c_13c1_4fe0_bb2e_b75bc57a4d1f.slice/crio-6f91b5e9d3c4debe7099afb550e515dc16bc47ec53d8441f0db89b68adbd5e91 WatchSource:0}: Error finding container 6f91b5e9d3c4debe7099afb550e515dc16bc47ec53d8441f0db89b68adbd5e91: Status 404 returned error can't find the container with id 6f91b5e9d3c4debe7099afb550e515dc16bc47ec53d8441f0db89b68adbd5e91 Jan 05 23:29:15 crc kubenswrapper[4910]: I0105 23:29:15.277706 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-fdhs8" event={"ID":"0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f","Type":"ContainerStarted","Data":"6f91b5e9d3c4debe7099afb550e515dc16bc47ec53d8441f0db89b68adbd5e91"} Jan 05 23:29:15 crc kubenswrapper[4910]: I0105 23:29:15.929502 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-s2ngj" podUID="63f387cd-1877-425f-8e52-5fe854426c89" containerName="ovn-controller" probeResult="failure" output=< Jan 05 23:29:15 crc kubenswrapper[4910]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 05 23:29:15 crc kubenswrapper[4910]: > Jan 05 23:29:15 crc kubenswrapper[4910]: I0105 23:29:15.981788 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-gn4cl" Jan 05 23:29:15 crc kubenswrapper[4910]: I0105 23:29:15.991243 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-gn4cl" Jan 05 23:29:16 crc kubenswrapper[4910]: I0105 23:29:16.136789 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-s2ngj-config-gk7v4"] Jan 05 23:29:16 crc kubenswrapper[4910]: I0105 23:29:16.138905 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s2ngj-config-gk7v4" Jan 05 23:29:16 crc kubenswrapper[4910]: I0105 23:29:16.142105 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 05 23:29:16 crc kubenswrapper[4910]: I0105 23:29:16.161137 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-s2ngj-config-gk7v4"] Jan 05 23:29:16 crc kubenswrapper[4910]: I0105 23:29:16.232308 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3366d11e-656d-47cb-8e7f-327bb77f5ec9-var-run\") pod \"ovn-controller-s2ngj-config-gk7v4\" (UID: \"3366d11e-656d-47cb-8e7f-327bb77f5ec9\") " pod="openstack/ovn-controller-s2ngj-config-gk7v4" Jan 05 23:29:16 crc kubenswrapper[4910]: I0105 23:29:16.232351 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3366d11e-656d-47cb-8e7f-327bb77f5ec9-scripts\") pod \"ovn-controller-s2ngj-config-gk7v4\" (UID: \"3366d11e-656d-47cb-8e7f-327bb77f5ec9\") " pod="openstack/ovn-controller-s2ngj-config-gk7v4" Jan 05 23:29:16 crc kubenswrapper[4910]: I0105 23:29:16.232383 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/3366d11e-656d-47cb-8e7f-327bb77f5ec9-additional-scripts\") pod \"ovn-controller-s2ngj-config-gk7v4\" (UID: \"3366d11e-656d-47cb-8e7f-327bb77f5ec9\") " pod="openstack/ovn-controller-s2ngj-config-gk7v4" Jan 05 23:29:16 crc kubenswrapper[4910]: I0105 23:29:16.232420 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-api-584c6dd788-6g8mm" Jan 05 23:29:16 crc kubenswrapper[4910]: I0105 23:29:16.232433 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3366d11e-656d-47cb-8e7f-327bb77f5ec9-var-log-ovn\") pod \"ovn-controller-s2ngj-config-gk7v4\" (UID: \"3366d11e-656d-47cb-8e7f-327bb77f5ec9\") " pod="openstack/ovn-controller-s2ngj-config-gk7v4" Jan 05 23:29:16 crc kubenswrapper[4910]: I0105 23:29:16.232476 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3366d11e-656d-47cb-8e7f-327bb77f5ec9-var-run-ovn\") pod \"ovn-controller-s2ngj-config-gk7v4\" (UID: \"3366d11e-656d-47cb-8e7f-327bb77f5ec9\") " pod="openstack/ovn-controller-s2ngj-config-gk7v4" Jan 05 23:29:16 crc kubenswrapper[4910]: I0105 23:29:16.232547 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gp22h\" (UniqueName: \"kubernetes.io/projected/3366d11e-656d-47cb-8e7f-327bb77f5ec9-kube-api-access-gp22h\") pod \"ovn-controller-s2ngj-config-gk7v4\" (UID: \"3366d11e-656d-47cb-8e7f-327bb77f5ec9\") " pod="openstack/ovn-controller-s2ngj-config-gk7v4" Jan 05 23:29:16 crc kubenswrapper[4910]: I0105 23:29:16.334497 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gp22h\" (UniqueName: \"kubernetes.io/projected/3366d11e-656d-47cb-8e7f-327bb77f5ec9-kube-api-access-gp22h\") pod \"ovn-controller-s2ngj-config-gk7v4\" (UID: \"3366d11e-656d-47cb-8e7f-327bb77f5ec9\") " pod="openstack/ovn-controller-s2ngj-config-gk7v4" Jan 05 23:29:16 crc kubenswrapper[4910]: I0105 23:29:16.334643 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3366d11e-656d-47cb-8e7f-327bb77f5ec9-var-run\") pod \"ovn-controller-s2ngj-config-gk7v4\" (UID: \"3366d11e-656d-47cb-8e7f-327bb77f5ec9\") " pod="openstack/ovn-controller-s2ngj-config-gk7v4" Jan 05 23:29:16 crc kubenswrapper[4910]: I0105 23:29:16.334664 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3366d11e-656d-47cb-8e7f-327bb77f5ec9-scripts\") pod \"ovn-controller-s2ngj-config-gk7v4\" (UID: \"3366d11e-656d-47cb-8e7f-327bb77f5ec9\") " pod="openstack/ovn-controller-s2ngj-config-gk7v4" Jan 05 23:29:16 crc kubenswrapper[4910]: I0105 23:29:16.334696 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/3366d11e-656d-47cb-8e7f-327bb77f5ec9-additional-scripts\") pod \"ovn-controller-s2ngj-config-gk7v4\" (UID: \"3366d11e-656d-47cb-8e7f-327bb77f5ec9\") " pod="openstack/ovn-controller-s2ngj-config-gk7v4" Jan 05 23:29:16 crc kubenswrapper[4910]: I0105 23:29:16.334735 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3366d11e-656d-47cb-8e7f-327bb77f5ec9-var-log-ovn\") pod \"ovn-controller-s2ngj-config-gk7v4\" (UID: \"3366d11e-656d-47cb-8e7f-327bb77f5ec9\") " pod="openstack/ovn-controller-s2ngj-config-gk7v4" Jan 05 23:29:16 crc kubenswrapper[4910]: I0105 23:29:16.334784 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3366d11e-656d-47cb-8e7f-327bb77f5ec9-var-run-ovn\") pod \"ovn-controller-s2ngj-config-gk7v4\" (UID: \"3366d11e-656d-47cb-8e7f-327bb77f5ec9\") " pod="openstack/ovn-controller-s2ngj-config-gk7v4" Jan 05 23:29:16 crc kubenswrapper[4910]: I0105 23:29:16.335561 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3366d11e-656d-47cb-8e7f-327bb77f5ec9-var-run-ovn\") pod \"ovn-controller-s2ngj-config-gk7v4\" (UID: \"3366d11e-656d-47cb-8e7f-327bb77f5ec9\") " pod="openstack/ovn-controller-s2ngj-config-gk7v4" Jan 05 23:29:16 crc kubenswrapper[4910]: I0105 23:29:16.335664 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3366d11e-656d-47cb-8e7f-327bb77f5ec9-var-run\") pod \"ovn-controller-s2ngj-config-gk7v4\" (UID: \"3366d11e-656d-47cb-8e7f-327bb77f5ec9\") " pod="openstack/ovn-controller-s2ngj-config-gk7v4" Jan 05 23:29:16 crc kubenswrapper[4910]: I0105 23:29:16.335677 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3366d11e-656d-47cb-8e7f-327bb77f5ec9-var-log-ovn\") pod \"ovn-controller-s2ngj-config-gk7v4\" (UID: \"3366d11e-656d-47cb-8e7f-327bb77f5ec9\") " pod="openstack/ovn-controller-s2ngj-config-gk7v4" Jan 05 23:29:16 crc kubenswrapper[4910]: I0105 23:29:16.336367 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/3366d11e-656d-47cb-8e7f-327bb77f5ec9-additional-scripts\") pod \"ovn-controller-s2ngj-config-gk7v4\" (UID: \"3366d11e-656d-47cb-8e7f-327bb77f5ec9\") " pod="openstack/ovn-controller-s2ngj-config-gk7v4" Jan 05 23:29:16 crc kubenswrapper[4910]: I0105 23:29:16.337085 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3366d11e-656d-47cb-8e7f-327bb77f5ec9-scripts\") pod \"ovn-controller-s2ngj-config-gk7v4\" (UID: \"3366d11e-656d-47cb-8e7f-327bb77f5ec9\") " pod="openstack/ovn-controller-s2ngj-config-gk7v4" Jan 05 23:29:16 crc kubenswrapper[4910]: I0105 23:29:16.354483 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gp22h\" (UniqueName: \"kubernetes.io/projected/3366d11e-656d-47cb-8e7f-327bb77f5ec9-kube-api-access-gp22h\") pod \"ovn-controller-s2ngj-config-gk7v4\" (UID: \"3366d11e-656d-47cb-8e7f-327bb77f5ec9\") " pod="openstack/ovn-controller-s2ngj-config-gk7v4" Jan 05 23:29:16 crc kubenswrapper[4910]: I0105 23:29:16.462160 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s2ngj-config-gk7v4" Jan 05 23:29:16 crc kubenswrapper[4910]: I0105 23:29:16.648506 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-api-584c6dd788-6g8mm" Jan 05 23:29:17 crc kubenswrapper[4910]: I0105 23:29:17.263023 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-s2ngj-config-gk7v4"] Jan 05 23:29:17 crc kubenswrapper[4910]: I0105 23:29:17.298275 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-qkp4b" event={"ID":"b05718f5-8878-4646-beaa-0cea45fcfda9","Type":"ContainerStarted","Data":"f5c98c66f6b7bcc3c0407b8e5b1f018796bd91fab4c844f75ab4ca053aa7b896"} Jan 05 23:29:17 crc kubenswrapper[4910]: I0105 23:29:17.305422 4910 generic.go:334] "Generic (PLEG): container finished" podID="0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f" containerID="042a784e7eaa163c9f4f4f613360a1c8267f39dcc4fd02190dccf1399d2a6b25" exitCode=0 Jan 05 23:29:17 crc kubenswrapper[4910]: I0105 23:29:17.306148 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-fdhs8" event={"ID":"0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f","Type":"ContainerDied","Data":"042a784e7eaa163c9f4f4f613360a1c8267f39dcc4fd02190dccf1399d2a6b25"} Jan 05 23:29:17 crc kubenswrapper[4910]: I0105 23:29:17.308149 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-s2ngj-config-gk7v4" event={"ID":"3366d11e-656d-47cb-8e7f-327bb77f5ec9","Type":"ContainerStarted","Data":"a0e6f5d78b232dd40a60a3e2d9851249000097b4803566d7e0bf58d1f24941df"} Jan 05 23:29:18 crc kubenswrapper[4910]: I0105 23:29:18.319691 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-fdhs8" event={"ID":"0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f","Type":"ContainerStarted","Data":"fb984cee4982f54b303e7582ed586d6a948cfe4da81d6f22e3a8e049b5dd583b"} Jan 05 23:29:18 crc kubenswrapper[4910]: I0105 23:29:18.334209 4910 generic.go:334] "Generic (PLEG): container finished" podID="3366d11e-656d-47cb-8e7f-327bb77f5ec9" containerID="ded6a8464e4069c2bb30e9c5db3f6707e373fd87a2528b9dfa68c70a0e3df5be" exitCode=0 Jan 05 23:29:18 crc kubenswrapper[4910]: I0105 23:29:18.334305 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-s2ngj-config-gk7v4" event={"ID":"3366d11e-656d-47cb-8e7f-327bb77f5ec9","Type":"ContainerDied","Data":"ded6a8464e4069c2bb30e9c5db3f6707e373fd87a2528b9dfa68c70a0e3df5be"} Jan 05 23:29:18 crc kubenswrapper[4910]: I0105 23:29:18.347145 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-db-sync-fdhs8" podStartSLOduration=5.347110767 podStartE2EDuration="5.347110767s" podCreationTimestamp="2026-01-05 23:29:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:29:18.338558185 +0000 UTC m=+5889.916055855" watchObservedRunningTime="2026-01-05 23:29:18.347110767 +0000 UTC m=+5889.924608437" Jan 05 23:29:19 crc kubenswrapper[4910]: I0105 23:29:19.371759 4910 generic.go:334] "Generic (PLEG): container finished" podID="b05718f5-8878-4646-beaa-0cea45fcfda9" containerID="f5c98c66f6b7bcc3c0407b8e5b1f018796bd91fab4c844f75ab4ca053aa7b896" exitCode=0 Jan 05 23:29:19 crc kubenswrapper[4910]: I0105 23:29:19.371853 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-qkp4b" event={"ID":"b05718f5-8878-4646-beaa-0cea45fcfda9","Type":"ContainerDied","Data":"f5c98c66f6b7bcc3c0407b8e5b1f018796bd91fab4c844f75ab4ca053aa7b896"} Jan 05 23:29:20 crc kubenswrapper[4910]: I0105 23:29:20.382479 4910 generic.go:334] "Generic (PLEG): container finished" podID="0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f" containerID="fb984cee4982f54b303e7582ed586d6a948cfe4da81d6f22e3a8e049b5dd583b" exitCode=0 Jan 05 23:29:20 crc kubenswrapper[4910]: I0105 23:29:20.382598 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-fdhs8" event={"ID":"0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f","Type":"ContainerDied","Data":"fb984cee4982f54b303e7582ed586d6a948cfe4da81d6f22e3a8e049b5dd583b"} Jan 05 23:29:20 crc kubenswrapper[4910]: I0105 23:29:20.922469 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-s2ngj" Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.009579 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s2ngj-config-gk7v4" Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.018864 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-fdhs8" Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.133503 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f-config-data\") pod \"0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f\" (UID: \"0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f\") " Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.133560 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f-config-data-merged\") pod \"0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f\" (UID: \"0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f\") " Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.133582 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gp22h\" (UniqueName: \"kubernetes.io/projected/3366d11e-656d-47cb-8e7f-327bb77f5ec9-kube-api-access-gp22h\") pod \"3366d11e-656d-47cb-8e7f-327bb77f5ec9\" (UID: \"3366d11e-656d-47cb-8e7f-327bb77f5ec9\") " Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.133614 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f-scripts\") pod \"0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f\" (UID: \"0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f\") " Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.133694 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3366d11e-656d-47cb-8e7f-327bb77f5ec9-var-run-ovn\") pod \"3366d11e-656d-47cb-8e7f-327bb77f5ec9\" (UID: \"3366d11e-656d-47cb-8e7f-327bb77f5ec9\") " Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.133709 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3366d11e-656d-47cb-8e7f-327bb77f5ec9-var-run\") pod \"3366d11e-656d-47cb-8e7f-327bb77f5ec9\" (UID: \"3366d11e-656d-47cb-8e7f-327bb77f5ec9\") " Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.133764 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/3366d11e-656d-47cb-8e7f-327bb77f5ec9-additional-scripts\") pod \"3366d11e-656d-47cb-8e7f-327bb77f5ec9\" (UID: \"3366d11e-656d-47cb-8e7f-327bb77f5ec9\") " Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.133833 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3366d11e-656d-47cb-8e7f-327bb77f5ec9-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "3366d11e-656d-47cb-8e7f-327bb77f5ec9" (UID: "3366d11e-656d-47cb-8e7f-327bb77f5ec9"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.133858 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f-combined-ca-bundle\") pod \"0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f\" (UID: \"0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f\") " Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.133895 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3366d11e-656d-47cb-8e7f-327bb77f5ec9-scripts\") pod \"3366d11e-656d-47cb-8e7f-327bb77f5ec9\" (UID: \"3366d11e-656d-47cb-8e7f-327bb77f5ec9\") " Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.133935 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3366d11e-656d-47cb-8e7f-327bb77f5ec9-var-log-ovn\") pod \"3366d11e-656d-47cb-8e7f-327bb77f5ec9\" (UID: \"3366d11e-656d-47cb-8e7f-327bb77f5ec9\") " Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.134063 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3366d11e-656d-47cb-8e7f-327bb77f5ec9-var-run" (OuterVolumeSpecName: "var-run") pod "3366d11e-656d-47cb-8e7f-327bb77f5ec9" (UID: "3366d11e-656d-47cb-8e7f-327bb77f5ec9"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.134349 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3366d11e-656d-47cb-8e7f-327bb77f5ec9-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "3366d11e-656d-47cb-8e7f-327bb77f5ec9" (UID: "3366d11e-656d-47cb-8e7f-327bb77f5ec9"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.134711 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3366d11e-656d-47cb-8e7f-327bb77f5ec9-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "3366d11e-656d-47cb-8e7f-327bb77f5ec9" (UID: "3366d11e-656d-47cb-8e7f-327bb77f5ec9"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.134921 4910 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/3366d11e-656d-47cb-8e7f-327bb77f5ec9-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.134937 4910 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3366d11e-656d-47cb-8e7f-327bb77f5ec9-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.134947 4910 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3366d11e-656d-47cb-8e7f-327bb77f5ec9-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.134955 4910 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3366d11e-656d-47cb-8e7f-327bb77f5ec9-var-run\") on node \"crc\" DevicePath \"\"" Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.134938 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3366d11e-656d-47cb-8e7f-327bb77f5ec9-scripts" (OuterVolumeSpecName: "scripts") pod "3366d11e-656d-47cb-8e7f-327bb77f5ec9" (UID: "3366d11e-656d-47cb-8e7f-327bb77f5ec9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.140258 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f-config-data" (OuterVolumeSpecName: "config-data") pod "0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f" (UID: "0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.140307 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3366d11e-656d-47cb-8e7f-327bb77f5ec9-kube-api-access-gp22h" (OuterVolumeSpecName: "kube-api-access-gp22h") pod "3366d11e-656d-47cb-8e7f-327bb77f5ec9" (UID: "3366d11e-656d-47cb-8e7f-327bb77f5ec9"). InnerVolumeSpecName "kube-api-access-gp22h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.155295 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f-scripts" (OuterVolumeSpecName: "scripts") pod "0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f" (UID: "0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.170671 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f-config-data-merged" (OuterVolumeSpecName: "config-data-merged") pod "0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f" (UID: "0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f"). InnerVolumeSpecName "config-data-merged". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.172944 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f" (UID: "0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.236894 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.236929 4910 reconciler_common.go:293] "Volume detached for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f-config-data-merged\") on node \"crc\" DevicePath \"\"" Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.236942 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gp22h\" (UniqueName: \"kubernetes.io/projected/3366d11e-656d-47cb-8e7f-327bb77f5ec9-kube-api-access-gp22h\") on node \"crc\" DevicePath \"\"" Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.236952 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.236960 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.236968 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3366d11e-656d-47cb-8e7f-327bb77f5ec9-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.430791 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-s2ngj-config-gk7v4" event={"ID":"3366d11e-656d-47cb-8e7f-327bb77f5ec9","Type":"ContainerDied","Data":"a0e6f5d78b232dd40a60a3e2d9851249000097b4803566d7e0bf58d1f24941df"} Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.430823 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s2ngj-config-gk7v4" Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.430836 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a0e6f5d78b232dd40a60a3e2d9851249000097b4803566d7e0bf58d1f24941df" Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.434337 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-fdhs8" event={"ID":"0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f","Type":"ContainerDied","Data":"6f91b5e9d3c4debe7099afb550e515dc16bc47ec53d8441f0db89b68adbd5e91"} Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.434378 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6f91b5e9d3c4debe7099afb550e515dc16bc47ec53d8441f0db89b68adbd5e91" Jan 05 23:29:25 crc kubenswrapper[4910]: I0105 23:29:25.434441 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-fdhs8" Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.118099 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-s2ngj-config-gk7v4"] Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.134171 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-s2ngj-config-gk7v4"] Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.240429 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-s2ngj-config-bn2q4"] Jan 05 23:29:26 crc kubenswrapper[4910]: E0105 23:29:26.241055 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3366d11e-656d-47cb-8e7f-327bb77f5ec9" containerName="ovn-config" Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.241075 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="3366d11e-656d-47cb-8e7f-327bb77f5ec9" containerName="ovn-config" Jan 05 23:29:26 crc kubenswrapper[4910]: E0105 23:29:26.241108 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f" containerName="init" Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.241117 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f" containerName="init" Jan 05 23:29:26 crc kubenswrapper[4910]: E0105 23:29:26.241159 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f" containerName="octavia-db-sync" Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.241168 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f" containerName="octavia-db-sync" Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.241401 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="3366d11e-656d-47cb-8e7f-327bb77f5ec9" containerName="ovn-config" Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.241421 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f" containerName="octavia-db-sync" Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.242265 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s2ngj-config-bn2q4" Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.244190 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.249693 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-s2ngj-config-bn2q4"] Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.392998 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/44ea1a4b-2105-4315-8368-d69b361dd810-scripts\") pod \"ovn-controller-s2ngj-config-bn2q4\" (UID: \"44ea1a4b-2105-4315-8368-d69b361dd810\") " pod="openstack/ovn-controller-s2ngj-config-bn2q4" Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.393306 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2tv2x\" (UniqueName: \"kubernetes.io/projected/44ea1a4b-2105-4315-8368-d69b361dd810-kube-api-access-2tv2x\") pod \"ovn-controller-s2ngj-config-bn2q4\" (UID: \"44ea1a4b-2105-4315-8368-d69b361dd810\") " pod="openstack/ovn-controller-s2ngj-config-bn2q4" Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.393372 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/44ea1a4b-2105-4315-8368-d69b361dd810-additional-scripts\") pod \"ovn-controller-s2ngj-config-bn2q4\" (UID: \"44ea1a4b-2105-4315-8368-d69b361dd810\") " pod="openstack/ovn-controller-s2ngj-config-bn2q4" Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.393439 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/44ea1a4b-2105-4315-8368-d69b361dd810-var-run-ovn\") pod \"ovn-controller-s2ngj-config-bn2q4\" (UID: \"44ea1a4b-2105-4315-8368-d69b361dd810\") " pod="openstack/ovn-controller-s2ngj-config-bn2q4" Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.393495 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/44ea1a4b-2105-4315-8368-d69b361dd810-var-log-ovn\") pod \"ovn-controller-s2ngj-config-bn2q4\" (UID: \"44ea1a4b-2105-4315-8368-d69b361dd810\") " pod="openstack/ovn-controller-s2ngj-config-bn2q4" Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.393523 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/44ea1a4b-2105-4315-8368-d69b361dd810-var-run\") pod \"ovn-controller-s2ngj-config-bn2q4\" (UID: \"44ea1a4b-2105-4315-8368-d69b361dd810\") " pod="openstack/ovn-controller-s2ngj-config-bn2q4" Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.461983 4910 generic.go:334] "Generic (PLEG): container finished" podID="6c6d4163-66d2-490d-9788-726d0585fecf" containerID="648e1d47b2a6028a2aaffc591c0432d3fde69a05414e0b8f0ce9c05252751276" exitCode=0 Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.462234 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-597bd57878-ctmfl" event={"ID":"6c6d4163-66d2-490d-9788-726d0585fecf","Type":"ContainerDied","Data":"648e1d47b2a6028a2aaffc591c0432d3fde69a05414e0b8f0ce9c05252751276"} Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.468859 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-qkp4b" event={"ID":"b05718f5-8878-4646-beaa-0cea45fcfda9","Type":"ContainerStarted","Data":"7f95d442f36824c0048f491d08d1a66cc483ec6e8c1659384136b61921f8250b"} Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.469380 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-rsyslog-qkp4b" Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.494900 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2tv2x\" (UniqueName: \"kubernetes.io/projected/44ea1a4b-2105-4315-8368-d69b361dd810-kube-api-access-2tv2x\") pod \"ovn-controller-s2ngj-config-bn2q4\" (UID: \"44ea1a4b-2105-4315-8368-d69b361dd810\") " pod="openstack/ovn-controller-s2ngj-config-bn2q4" Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.494969 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/44ea1a4b-2105-4315-8368-d69b361dd810-additional-scripts\") pod \"ovn-controller-s2ngj-config-bn2q4\" (UID: \"44ea1a4b-2105-4315-8368-d69b361dd810\") " pod="openstack/ovn-controller-s2ngj-config-bn2q4" Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.495025 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/44ea1a4b-2105-4315-8368-d69b361dd810-var-run-ovn\") pod \"ovn-controller-s2ngj-config-bn2q4\" (UID: \"44ea1a4b-2105-4315-8368-d69b361dd810\") " pod="openstack/ovn-controller-s2ngj-config-bn2q4" Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.495045 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/44ea1a4b-2105-4315-8368-d69b361dd810-var-log-ovn\") pod \"ovn-controller-s2ngj-config-bn2q4\" (UID: \"44ea1a4b-2105-4315-8368-d69b361dd810\") " pod="openstack/ovn-controller-s2ngj-config-bn2q4" Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.495074 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/44ea1a4b-2105-4315-8368-d69b361dd810-var-run\") pod \"ovn-controller-s2ngj-config-bn2q4\" (UID: \"44ea1a4b-2105-4315-8368-d69b361dd810\") " pod="openstack/ovn-controller-s2ngj-config-bn2q4" Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.495178 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/44ea1a4b-2105-4315-8368-d69b361dd810-scripts\") pod \"ovn-controller-s2ngj-config-bn2q4\" (UID: \"44ea1a4b-2105-4315-8368-d69b361dd810\") " pod="openstack/ovn-controller-s2ngj-config-bn2q4" Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.495665 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/44ea1a4b-2105-4315-8368-d69b361dd810-var-run-ovn\") pod \"ovn-controller-s2ngj-config-bn2q4\" (UID: \"44ea1a4b-2105-4315-8368-d69b361dd810\") " pod="openstack/ovn-controller-s2ngj-config-bn2q4" Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.496442 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/44ea1a4b-2105-4315-8368-d69b361dd810-additional-scripts\") pod \"ovn-controller-s2ngj-config-bn2q4\" (UID: \"44ea1a4b-2105-4315-8368-d69b361dd810\") " pod="openstack/ovn-controller-s2ngj-config-bn2q4" Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.496514 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/44ea1a4b-2105-4315-8368-d69b361dd810-var-log-ovn\") pod \"ovn-controller-s2ngj-config-bn2q4\" (UID: \"44ea1a4b-2105-4315-8368-d69b361dd810\") " pod="openstack/ovn-controller-s2ngj-config-bn2q4" Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.496565 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/44ea1a4b-2105-4315-8368-d69b361dd810-var-run\") pod \"ovn-controller-s2ngj-config-bn2q4\" (UID: \"44ea1a4b-2105-4315-8368-d69b361dd810\") " pod="openstack/ovn-controller-s2ngj-config-bn2q4" Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.498136 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/44ea1a4b-2105-4315-8368-d69b361dd810-scripts\") pod \"ovn-controller-s2ngj-config-bn2q4\" (UID: \"44ea1a4b-2105-4315-8368-d69b361dd810\") " pod="openstack/ovn-controller-s2ngj-config-bn2q4" Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.506296 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-rsyslog-qkp4b" podStartSLOduration=2.337010788 podStartE2EDuration="15.506269972s" podCreationTimestamp="2026-01-05 23:29:11 +0000 UTC" firstStartedPulling="2026-01-05 23:29:12.304785262 +0000 UTC m=+5883.882282932" lastFinishedPulling="2026-01-05 23:29:25.474044446 +0000 UTC m=+5897.051542116" observedRunningTime="2026-01-05 23:29:26.499461784 +0000 UTC m=+5898.076959454" watchObservedRunningTime="2026-01-05 23:29:26.506269972 +0000 UTC m=+5898.083767642" Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.523738 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2tv2x\" (UniqueName: \"kubernetes.io/projected/44ea1a4b-2105-4315-8368-d69b361dd810-kube-api-access-2tv2x\") pod \"ovn-controller-s2ngj-config-bn2q4\" (UID: \"44ea1a4b-2105-4315-8368-d69b361dd810\") " pod="openstack/ovn-controller-s2ngj-config-bn2q4" Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.606684 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s2ngj-config-bn2q4" Jan 05 23:29:26 crc kubenswrapper[4910]: I0105 23:29:26.737029 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3366d11e-656d-47cb-8e7f-327bb77f5ec9" path="/var/lib/kubelet/pods/3366d11e-656d-47cb-8e7f-327bb77f5ec9/volumes" Jan 05 23:29:27 crc kubenswrapper[4910]: I0105 23:29:27.092937 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-s2ngj-config-bn2q4"] Jan 05 23:29:27 crc kubenswrapper[4910]: W0105 23:29:27.094707 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod44ea1a4b_2105_4315_8368_d69b361dd810.slice/crio-149c1479911778aa303ffacc8672163fb7cb346a7902f08dfa6e68762356430f WatchSource:0}: Error finding container 149c1479911778aa303ffacc8672163fb7cb346a7902f08dfa6e68762356430f: Status 404 returned error can't find the container with id 149c1479911778aa303ffacc8672163fb7cb346a7902f08dfa6e68762356430f Jan 05 23:29:27 crc kubenswrapper[4910]: I0105 23:29:27.481245 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-597bd57878-ctmfl" event={"ID":"6c6d4163-66d2-490d-9788-726d0585fecf","Type":"ContainerStarted","Data":"50a397be1cf17cb76602a2af779767f487c856691940e9db08af6c8f26d2ba95"} Jan 05 23:29:27 crc kubenswrapper[4910]: I0105 23:29:27.484538 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-s2ngj-config-bn2q4" event={"ID":"44ea1a4b-2105-4315-8368-d69b361dd810","Type":"ContainerStarted","Data":"233d33d95be1fca8d90b8a8b08a404f037302322afe421ab5f7da382d3d8ae09"} Jan 05 23:29:27 crc kubenswrapper[4910]: I0105 23:29:27.484874 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-s2ngj-config-bn2q4" event={"ID":"44ea1a4b-2105-4315-8368-d69b361dd810","Type":"ContainerStarted","Data":"149c1479911778aa303ffacc8672163fb7cb346a7902f08dfa6e68762356430f"} Jan 05 23:29:27 crc kubenswrapper[4910]: I0105 23:29:27.508798 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-image-upload-597bd57878-ctmfl" podStartSLOduration=2.956891379 podStartE2EDuration="15.508776542s" podCreationTimestamp="2026-01-05 23:29:12 +0000 UTC" firstStartedPulling="2026-01-05 23:29:12.980898698 +0000 UTC m=+5884.558396368" lastFinishedPulling="2026-01-05 23:29:25.532783861 +0000 UTC m=+5897.110281531" observedRunningTime="2026-01-05 23:29:27.500688392 +0000 UTC m=+5899.078186072" watchObservedRunningTime="2026-01-05 23:29:27.508776542 +0000 UTC m=+5899.086274212" Jan 05 23:29:27 crc kubenswrapper[4910]: I0105 23:29:27.537344 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-s2ngj-config-bn2q4" podStartSLOduration=1.53732258 podStartE2EDuration="1.53732258s" podCreationTimestamp="2026-01-05 23:29:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:29:27.532751656 +0000 UTC m=+5899.110249326" watchObservedRunningTime="2026-01-05 23:29:27.53732258 +0000 UTC m=+5899.114820250" Jan 05 23:29:28 crc kubenswrapper[4910]: I0105 23:29:28.512916 4910 generic.go:334] "Generic (PLEG): container finished" podID="44ea1a4b-2105-4315-8368-d69b361dd810" containerID="233d33d95be1fca8d90b8a8b08a404f037302322afe421ab5f7da382d3d8ae09" exitCode=0 Jan 05 23:29:28 crc kubenswrapper[4910]: I0105 23:29:28.513036 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-s2ngj-config-bn2q4" event={"ID":"44ea1a4b-2105-4315-8368-d69b361dd810","Type":"ContainerDied","Data":"233d33d95be1fca8d90b8a8b08a404f037302322afe421ab5f7da382d3d8ae09"} Jan 05 23:29:29 crc kubenswrapper[4910]: I0105 23:29:29.946352 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s2ngj-config-bn2q4" Jan 05 23:29:30 crc kubenswrapper[4910]: I0105 23:29:30.085540 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/44ea1a4b-2105-4315-8368-d69b361dd810-var-run\") pod \"44ea1a4b-2105-4315-8368-d69b361dd810\" (UID: \"44ea1a4b-2105-4315-8368-d69b361dd810\") " Jan 05 23:29:30 crc kubenswrapper[4910]: I0105 23:29:30.085668 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/44ea1a4b-2105-4315-8368-d69b361dd810-var-run" (OuterVolumeSpecName: "var-run") pod "44ea1a4b-2105-4315-8368-d69b361dd810" (UID: "44ea1a4b-2105-4315-8368-d69b361dd810"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 23:29:30 crc kubenswrapper[4910]: I0105 23:29:30.085683 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/44ea1a4b-2105-4315-8368-d69b361dd810-var-log-ovn\") pod \"44ea1a4b-2105-4315-8368-d69b361dd810\" (UID: \"44ea1a4b-2105-4315-8368-d69b361dd810\") " Jan 05 23:29:30 crc kubenswrapper[4910]: I0105 23:29:30.085775 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2tv2x\" (UniqueName: \"kubernetes.io/projected/44ea1a4b-2105-4315-8368-d69b361dd810-kube-api-access-2tv2x\") pod \"44ea1a4b-2105-4315-8368-d69b361dd810\" (UID: \"44ea1a4b-2105-4315-8368-d69b361dd810\") " Jan 05 23:29:30 crc kubenswrapper[4910]: I0105 23:29:30.085774 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/44ea1a4b-2105-4315-8368-d69b361dd810-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "44ea1a4b-2105-4315-8368-d69b361dd810" (UID: "44ea1a4b-2105-4315-8368-d69b361dd810"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 23:29:30 crc kubenswrapper[4910]: I0105 23:29:30.085845 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/44ea1a4b-2105-4315-8368-d69b361dd810-additional-scripts\") pod \"44ea1a4b-2105-4315-8368-d69b361dd810\" (UID: \"44ea1a4b-2105-4315-8368-d69b361dd810\") " Jan 05 23:29:30 crc kubenswrapper[4910]: I0105 23:29:30.085929 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/44ea1a4b-2105-4315-8368-d69b361dd810-scripts\") pod \"44ea1a4b-2105-4315-8368-d69b361dd810\" (UID: \"44ea1a4b-2105-4315-8368-d69b361dd810\") " Jan 05 23:29:30 crc kubenswrapper[4910]: I0105 23:29:30.085966 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/44ea1a4b-2105-4315-8368-d69b361dd810-var-run-ovn\") pod \"44ea1a4b-2105-4315-8368-d69b361dd810\" (UID: \"44ea1a4b-2105-4315-8368-d69b361dd810\") " Jan 05 23:29:30 crc kubenswrapper[4910]: I0105 23:29:30.086097 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/44ea1a4b-2105-4315-8368-d69b361dd810-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "44ea1a4b-2105-4315-8368-d69b361dd810" (UID: "44ea1a4b-2105-4315-8368-d69b361dd810"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 23:29:30 crc kubenswrapper[4910]: I0105 23:29:30.086505 4910 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/44ea1a4b-2105-4315-8368-d69b361dd810-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 05 23:29:30 crc kubenswrapper[4910]: I0105 23:29:30.086533 4910 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/44ea1a4b-2105-4315-8368-d69b361dd810-var-run\") on node \"crc\" DevicePath \"\"" Jan 05 23:29:30 crc kubenswrapper[4910]: I0105 23:29:30.086544 4910 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/44ea1a4b-2105-4315-8368-d69b361dd810-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 05 23:29:30 crc kubenswrapper[4910]: I0105 23:29:30.086847 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/44ea1a4b-2105-4315-8368-d69b361dd810-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "44ea1a4b-2105-4315-8368-d69b361dd810" (UID: "44ea1a4b-2105-4315-8368-d69b361dd810"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:29:30 crc kubenswrapper[4910]: I0105 23:29:30.086997 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/44ea1a4b-2105-4315-8368-d69b361dd810-scripts" (OuterVolumeSpecName: "scripts") pod "44ea1a4b-2105-4315-8368-d69b361dd810" (UID: "44ea1a4b-2105-4315-8368-d69b361dd810"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:29:30 crc kubenswrapper[4910]: I0105 23:29:30.103451 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44ea1a4b-2105-4315-8368-d69b361dd810-kube-api-access-2tv2x" (OuterVolumeSpecName: "kube-api-access-2tv2x") pod "44ea1a4b-2105-4315-8368-d69b361dd810" (UID: "44ea1a4b-2105-4315-8368-d69b361dd810"). InnerVolumeSpecName "kube-api-access-2tv2x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:29:30 crc kubenswrapper[4910]: I0105 23:29:30.167685 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-s2ngj-config-bn2q4"] Jan 05 23:29:30 crc kubenswrapper[4910]: I0105 23:29:30.190261 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2tv2x\" (UniqueName: \"kubernetes.io/projected/44ea1a4b-2105-4315-8368-d69b361dd810-kube-api-access-2tv2x\") on node \"crc\" DevicePath \"\"" Jan 05 23:29:30 crc kubenswrapper[4910]: I0105 23:29:30.190331 4910 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/44ea1a4b-2105-4315-8368-d69b361dd810-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:29:30 crc kubenswrapper[4910]: I0105 23:29:30.190351 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/44ea1a4b-2105-4315-8368-d69b361dd810-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:29:30 crc kubenswrapper[4910]: I0105 23:29:30.196104 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-s2ngj-config-bn2q4"] Jan 05 23:29:30 crc kubenswrapper[4910]: I0105 23:29:30.540980 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="149c1479911778aa303ffacc8672163fb7cb346a7902f08dfa6e68762356430f" Jan 05 23:29:30 crc kubenswrapper[4910]: I0105 23:29:30.541067 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s2ngj-config-bn2q4" Jan 05 23:29:30 crc kubenswrapper[4910]: I0105 23:29:30.741942 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44ea1a4b-2105-4315-8368-d69b361dd810" path="/var/lib/kubelet/pods/44ea1a4b-2105-4315-8368-d69b361dd810/volumes" Jan 05 23:29:41 crc kubenswrapper[4910]: I0105 23:29:41.779654 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-rsyslog-qkp4b" Jan 05 23:29:51 crc kubenswrapper[4910]: I0105 23:29:51.430526 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-image-upload-597bd57878-ctmfl"] Jan 05 23:29:51 crc kubenswrapper[4910]: I0105 23:29:51.431257 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/octavia-image-upload-597bd57878-ctmfl" podUID="6c6d4163-66d2-490d-9788-726d0585fecf" containerName="octavia-amphora-httpd" containerID="cri-o://50a397be1cf17cb76602a2af779767f487c856691940e9db08af6c8f26d2ba95" gracePeriod=30 Jan 05 23:29:51 crc kubenswrapper[4910]: I0105 23:29:51.783512 4910 generic.go:334] "Generic (PLEG): container finished" podID="6c6d4163-66d2-490d-9788-726d0585fecf" containerID="50a397be1cf17cb76602a2af779767f487c856691940e9db08af6c8f26d2ba95" exitCode=0 Jan 05 23:29:51 crc kubenswrapper[4910]: I0105 23:29:51.783587 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-597bd57878-ctmfl" event={"ID":"6c6d4163-66d2-490d-9788-726d0585fecf","Type":"ContainerDied","Data":"50a397be1cf17cb76602a2af779767f487c856691940e9db08af6c8f26d2ba95"} Jan 05 23:29:52 crc kubenswrapper[4910]: I0105 23:29:52.051492 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-597bd57878-ctmfl" Jan 05 23:29:52 crc kubenswrapper[4910]: I0105 23:29:52.247515 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6c6d4163-66d2-490d-9788-726d0585fecf-httpd-config\") pod \"6c6d4163-66d2-490d-9788-726d0585fecf\" (UID: \"6c6d4163-66d2-490d-9788-726d0585fecf\") " Jan 05 23:29:52 crc kubenswrapper[4910]: I0105 23:29:52.247603 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/6c6d4163-66d2-490d-9788-726d0585fecf-amphora-image\") pod \"6c6d4163-66d2-490d-9788-726d0585fecf\" (UID: \"6c6d4163-66d2-490d-9788-726d0585fecf\") " Jan 05 23:29:52 crc kubenswrapper[4910]: I0105 23:29:52.289564 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c6d4163-66d2-490d-9788-726d0585fecf-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "6c6d4163-66d2-490d-9788-726d0585fecf" (UID: "6c6d4163-66d2-490d-9788-726d0585fecf"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:29:52 crc kubenswrapper[4910]: I0105 23:29:52.316495 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6c6d4163-66d2-490d-9788-726d0585fecf-amphora-image" (OuterVolumeSpecName: "amphora-image") pod "6c6d4163-66d2-490d-9788-726d0585fecf" (UID: "6c6d4163-66d2-490d-9788-726d0585fecf"). InnerVolumeSpecName "amphora-image". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:29:52 crc kubenswrapper[4910]: I0105 23:29:52.352575 4910 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6c6d4163-66d2-490d-9788-726d0585fecf-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 05 23:29:52 crc kubenswrapper[4910]: I0105 23:29:52.352618 4910 reconciler_common.go:293] "Volume detached for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/6c6d4163-66d2-490d-9788-726d0585fecf-amphora-image\") on node \"crc\" DevicePath \"\"" Jan 05 23:29:52 crc kubenswrapper[4910]: I0105 23:29:52.801309 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-597bd57878-ctmfl" event={"ID":"6c6d4163-66d2-490d-9788-726d0585fecf","Type":"ContainerDied","Data":"a1a53c03a6096d19f7c6dab06567385486989f0ec8484533ebd77d1c1fd1e1f6"} Jan 05 23:29:52 crc kubenswrapper[4910]: I0105 23:29:52.801368 4910 scope.go:117] "RemoveContainer" containerID="50a397be1cf17cb76602a2af779767f487c856691940e9db08af6c8f26d2ba95" Jan 05 23:29:52 crc kubenswrapper[4910]: I0105 23:29:52.801422 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-597bd57878-ctmfl" Jan 05 23:29:52 crc kubenswrapper[4910]: I0105 23:29:52.837248 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-image-upload-597bd57878-ctmfl"] Jan 05 23:29:52 crc kubenswrapper[4910]: I0105 23:29:52.844949 4910 scope.go:117] "RemoveContainer" containerID="648e1d47b2a6028a2aaffc591c0432d3fde69a05414e0b8f0ce9c05252751276" Jan 05 23:29:52 crc kubenswrapper[4910]: I0105 23:29:52.851883 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-image-upload-597bd57878-ctmfl"] Jan 05 23:29:54 crc kubenswrapper[4910]: I0105 23:29:54.742505 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c6d4163-66d2-490d-9788-726d0585fecf" path="/var/lib/kubelet/pods/6c6d4163-66d2-490d-9788-726d0585fecf/volumes" Jan 05 23:30:00 crc kubenswrapper[4910]: I0105 23:30:00.170310 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460930-nx8x6"] Jan 05 23:30:00 crc kubenswrapper[4910]: E0105 23:30:00.171562 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c6d4163-66d2-490d-9788-726d0585fecf" containerName="octavia-amphora-httpd" Jan 05 23:30:00 crc kubenswrapper[4910]: I0105 23:30:00.171586 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c6d4163-66d2-490d-9788-726d0585fecf" containerName="octavia-amphora-httpd" Jan 05 23:30:00 crc kubenswrapper[4910]: E0105 23:30:00.171620 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c6d4163-66d2-490d-9788-726d0585fecf" containerName="init" Jan 05 23:30:00 crc kubenswrapper[4910]: I0105 23:30:00.171633 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c6d4163-66d2-490d-9788-726d0585fecf" containerName="init" Jan 05 23:30:00 crc kubenswrapper[4910]: E0105 23:30:00.171693 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44ea1a4b-2105-4315-8368-d69b361dd810" containerName="ovn-config" Jan 05 23:30:00 crc kubenswrapper[4910]: I0105 23:30:00.171710 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="44ea1a4b-2105-4315-8368-d69b361dd810" containerName="ovn-config" Jan 05 23:30:00 crc kubenswrapper[4910]: I0105 23:30:00.172008 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c6d4163-66d2-490d-9788-726d0585fecf" containerName="octavia-amphora-httpd" Jan 05 23:30:00 crc kubenswrapper[4910]: I0105 23:30:00.172050 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="44ea1a4b-2105-4315-8368-d69b361dd810" containerName="ovn-config" Jan 05 23:30:00 crc kubenswrapper[4910]: I0105 23:30:00.173378 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460930-nx8x6" Jan 05 23:30:00 crc kubenswrapper[4910]: I0105 23:30:00.179235 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 05 23:30:00 crc kubenswrapper[4910]: I0105 23:30:00.179295 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 05 23:30:00 crc kubenswrapper[4910]: I0105 23:30:00.183655 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460930-nx8x6"] Jan 05 23:30:00 crc kubenswrapper[4910]: I0105 23:30:00.229906 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/280ce148-2a68-45eb-b6e4-f3945be8eae2-secret-volume\") pod \"collect-profiles-29460930-nx8x6\" (UID: \"280ce148-2a68-45eb-b6e4-f3945be8eae2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460930-nx8x6" Jan 05 23:30:00 crc kubenswrapper[4910]: I0105 23:30:00.230371 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/280ce148-2a68-45eb-b6e4-f3945be8eae2-config-volume\") pod \"collect-profiles-29460930-nx8x6\" (UID: \"280ce148-2a68-45eb-b6e4-f3945be8eae2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460930-nx8x6" Jan 05 23:30:00 crc kubenswrapper[4910]: I0105 23:30:00.230511 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fkrtq\" (UniqueName: \"kubernetes.io/projected/280ce148-2a68-45eb-b6e4-f3945be8eae2-kube-api-access-fkrtq\") pod \"collect-profiles-29460930-nx8x6\" (UID: \"280ce148-2a68-45eb-b6e4-f3945be8eae2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460930-nx8x6" Jan 05 23:30:00 crc kubenswrapper[4910]: I0105 23:30:00.333328 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/280ce148-2a68-45eb-b6e4-f3945be8eae2-secret-volume\") pod \"collect-profiles-29460930-nx8x6\" (UID: \"280ce148-2a68-45eb-b6e4-f3945be8eae2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460930-nx8x6" Jan 05 23:30:00 crc kubenswrapper[4910]: I0105 23:30:00.333466 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/280ce148-2a68-45eb-b6e4-f3945be8eae2-config-volume\") pod \"collect-profiles-29460930-nx8x6\" (UID: \"280ce148-2a68-45eb-b6e4-f3945be8eae2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460930-nx8x6" Jan 05 23:30:00 crc kubenswrapper[4910]: I0105 23:30:00.333525 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fkrtq\" (UniqueName: \"kubernetes.io/projected/280ce148-2a68-45eb-b6e4-f3945be8eae2-kube-api-access-fkrtq\") pod \"collect-profiles-29460930-nx8x6\" (UID: \"280ce148-2a68-45eb-b6e4-f3945be8eae2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460930-nx8x6" Jan 05 23:30:00 crc kubenswrapper[4910]: I0105 23:30:00.334751 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/280ce148-2a68-45eb-b6e4-f3945be8eae2-config-volume\") pod \"collect-profiles-29460930-nx8x6\" (UID: \"280ce148-2a68-45eb-b6e4-f3945be8eae2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460930-nx8x6" Jan 05 23:30:00 crc kubenswrapper[4910]: I0105 23:30:00.341535 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/280ce148-2a68-45eb-b6e4-f3945be8eae2-secret-volume\") pod \"collect-profiles-29460930-nx8x6\" (UID: \"280ce148-2a68-45eb-b6e4-f3945be8eae2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460930-nx8x6" Jan 05 23:30:00 crc kubenswrapper[4910]: I0105 23:30:00.357265 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fkrtq\" (UniqueName: \"kubernetes.io/projected/280ce148-2a68-45eb-b6e4-f3945be8eae2-kube-api-access-fkrtq\") pod \"collect-profiles-29460930-nx8x6\" (UID: \"280ce148-2a68-45eb-b6e4-f3945be8eae2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460930-nx8x6" Jan 05 23:30:00 crc kubenswrapper[4910]: I0105 23:30:00.512727 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460930-nx8x6" Jan 05 23:30:00 crc kubenswrapper[4910]: I0105 23:30:00.999334 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460930-nx8x6"] Jan 05 23:30:01 crc kubenswrapper[4910]: I0105 23:30:01.906228 4910 generic.go:334] "Generic (PLEG): container finished" podID="280ce148-2a68-45eb-b6e4-f3945be8eae2" containerID="d9fd3bacad6f65214012dd5ce8b25b5912f28296bd9d139ec8c8007ba6268201" exitCode=0 Jan 05 23:30:01 crc kubenswrapper[4910]: I0105 23:30:01.906293 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29460930-nx8x6" event={"ID":"280ce148-2a68-45eb-b6e4-f3945be8eae2","Type":"ContainerDied","Data":"d9fd3bacad6f65214012dd5ce8b25b5912f28296bd9d139ec8c8007ba6268201"} Jan 05 23:30:01 crc kubenswrapper[4910]: I0105 23:30:01.906628 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29460930-nx8x6" event={"ID":"280ce148-2a68-45eb-b6e4-f3945be8eae2","Type":"ContainerStarted","Data":"0dc2c43ecc4d27e5cc069fc74eaf7d118349b4c3024dddad096c7cb6105f5433"} Jan 05 23:30:03 crc kubenswrapper[4910]: I0105 23:30:03.371889 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460930-nx8x6" Jan 05 23:30:03 crc kubenswrapper[4910]: I0105 23:30:03.402874 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/280ce148-2a68-45eb-b6e4-f3945be8eae2-secret-volume\") pod \"280ce148-2a68-45eb-b6e4-f3945be8eae2\" (UID: \"280ce148-2a68-45eb-b6e4-f3945be8eae2\") " Jan 05 23:30:03 crc kubenswrapper[4910]: I0105 23:30:03.403076 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/280ce148-2a68-45eb-b6e4-f3945be8eae2-config-volume\") pod \"280ce148-2a68-45eb-b6e4-f3945be8eae2\" (UID: \"280ce148-2a68-45eb-b6e4-f3945be8eae2\") " Jan 05 23:30:03 crc kubenswrapper[4910]: I0105 23:30:03.403302 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fkrtq\" (UniqueName: \"kubernetes.io/projected/280ce148-2a68-45eb-b6e4-f3945be8eae2-kube-api-access-fkrtq\") pod \"280ce148-2a68-45eb-b6e4-f3945be8eae2\" (UID: \"280ce148-2a68-45eb-b6e4-f3945be8eae2\") " Jan 05 23:30:03 crc kubenswrapper[4910]: I0105 23:30:03.404557 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/280ce148-2a68-45eb-b6e4-f3945be8eae2-config-volume" (OuterVolumeSpecName: "config-volume") pod "280ce148-2a68-45eb-b6e4-f3945be8eae2" (UID: "280ce148-2a68-45eb-b6e4-f3945be8eae2"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:30:03 crc kubenswrapper[4910]: I0105 23:30:03.416324 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/280ce148-2a68-45eb-b6e4-f3945be8eae2-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "280ce148-2a68-45eb-b6e4-f3945be8eae2" (UID: "280ce148-2a68-45eb-b6e4-f3945be8eae2"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:30:03 crc kubenswrapper[4910]: I0105 23:30:03.418222 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/280ce148-2a68-45eb-b6e4-f3945be8eae2-kube-api-access-fkrtq" (OuterVolumeSpecName: "kube-api-access-fkrtq") pod "280ce148-2a68-45eb-b6e4-f3945be8eae2" (UID: "280ce148-2a68-45eb-b6e4-f3945be8eae2"). InnerVolumeSpecName "kube-api-access-fkrtq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:30:03 crc kubenswrapper[4910]: I0105 23:30:03.505650 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fkrtq\" (UniqueName: \"kubernetes.io/projected/280ce148-2a68-45eb-b6e4-f3945be8eae2-kube-api-access-fkrtq\") on node \"crc\" DevicePath \"\"" Jan 05 23:30:03 crc kubenswrapper[4910]: I0105 23:30:03.505686 4910 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/280ce148-2a68-45eb-b6e4-f3945be8eae2-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 05 23:30:03 crc kubenswrapper[4910]: I0105 23:30:03.505699 4910 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/280ce148-2a68-45eb-b6e4-f3945be8eae2-config-volume\") on node \"crc\" DevicePath \"\"" Jan 05 23:30:03 crc kubenswrapper[4910]: I0105 23:30:03.941674 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29460930-nx8x6" event={"ID":"280ce148-2a68-45eb-b6e4-f3945be8eae2","Type":"ContainerDied","Data":"0dc2c43ecc4d27e5cc069fc74eaf7d118349b4c3024dddad096c7cb6105f5433"} Jan 05 23:30:03 crc kubenswrapper[4910]: I0105 23:30:03.941730 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0dc2c43ecc4d27e5cc069fc74eaf7d118349b4c3024dddad096c7cb6105f5433" Jan 05 23:30:03 crc kubenswrapper[4910]: I0105 23:30:03.941827 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460930-nx8x6" Jan 05 23:30:04 crc kubenswrapper[4910]: I0105 23:30:04.453524 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460885-zfm2s"] Jan 05 23:30:04 crc kubenswrapper[4910]: I0105 23:30:04.463576 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460885-zfm2s"] Jan 05 23:30:04 crc kubenswrapper[4910]: I0105 23:30:04.740239 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5da3b211-d6bb-4b8b-8918-21d631902c74" path="/var/lib/kubelet/pods/5da3b211-d6bb-4b8b-8918-21d631902c74/volumes" Jan 05 23:30:10 crc kubenswrapper[4910]: I0105 23:30:10.953039 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 23:30:10 crc kubenswrapper[4910]: I0105 23:30:10.954314 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 23:30:16 crc kubenswrapper[4910]: I0105 23:30:16.124028 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-healthmanager-vd7kz"] Jan 05 23:30:16 crc kubenswrapper[4910]: E0105 23:30:16.124976 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="280ce148-2a68-45eb-b6e4-f3945be8eae2" containerName="collect-profiles" Jan 05 23:30:16 crc kubenswrapper[4910]: I0105 23:30:16.124989 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="280ce148-2a68-45eb-b6e4-f3945be8eae2" containerName="collect-profiles" Jan 05 23:30:16 crc kubenswrapper[4910]: I0105 23:30:16.125731 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="280ce148-2a68-45eb-b6e4-f3945be8eae2" containerName="collect-profiles" Jan 05 23:30:16 crc kubenswrapper[4910]: I0105 23:30:16.126988 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-healthmanager-vd7kz" Jan 05 23:30:16 crc kubenswrapper[4910]: I0105 23:30:16.132213 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-healthmanager-config-data" Jan 05 23:30:16 crc kubenswrapper[4910]: I0105 23:30:16.132470 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-certs-secret" Jan 05 23:30:16 crc kubenswrapper[4910]: I0105 23:30:16.132575 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-healthmanager-scripts" Jan 05 23:30:16 crc kubenswrapper[4910]: I0105 23:30:16.138585 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-healthmanager-vd7kz"] Jan 05 23:30:16 crc kubenswrapper[4910]: I0105 23:30:16.238928 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1262f577-e64a-4760-818a-961ba274fc65-config-data\") pod \"octavia-healthmanager-vd7kz\" (UID: \"1262f577-e64a-4760-818a-961ba274fc65\") " pod="openstack/octavia-healthmanager-vd7kz" Jan 05 23:30:16 crc kubenswrapper[4910]: I0105 23:30:16.239019 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1262f577-e64a-4760-818a-961ba274fc65-scripts\") pod \"octavia-healthmanager-vd7kz\" (UID: \"1262f577-e64a-4760-818a-961ba274fc65\") " pod="openstack/octavia-healthmanager-vd7kz" Jan 05 23:30:16 crc kubenswrapper[4910]: I0105 23:30:16.239052 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/1262f577-e64a-4760-818a-961ba274fc65-config-data-merged\") pod \"octavia-healthmanager-vd7kz\" (UID: \"1262f577-e64a-4760-818a-961ba274fc65\") " pod="openstack/octavia-healthmanager-vd7kz" Jan 05 23:30:16 crc kubenswrapper[4910]: I0105 23:30:16.239174 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/1262f577-e64a-4760-818a-961ba274fc65-hm-ports\") pod \"octavia-healthmanager-vd7kz\" (UID: \"1262f577-e64a-4760-818a-961ba274fc65\") " pod="openstack/octavia-healthmanager-vd7kz" Jan 05 23:30:16 crc kubenswrapper[4910]: I0105 23:30:16.239215 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1262f577-e64a-4760-818a-961ba274fc65-combined-ca-bundle\") pod \"octavia-healthmanager-vd7kz\" (UID: \"1262f577-e64a-4760-818a-961ba274fc65\") " pod="openstack/octavia-healthmanager-vd7kz" Jan 05 23:30:16 crc kubenswrapper[4910]: I0105 23:30:16.239269 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/1262f577-e64a-4760-818a-961ba274fc65-amphora-certs\") pod \"octavia-healthmanager-vd7kz\" (UID: \"1262f577-e64a-4760-818a-961ba274fc65\") " pod="openstack/octavia-healthmanager-vd7kz" Jan 05 23:30:16 crc kubenswrapper[4910]: I0105 23:30:16.340689 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1262f577-e64a-4760-818a-961ba274fc65-scripts\") pod \"octavia-healthmanager-vd7kz\" (UID: \"1262f577-e64a-4760-818a-961ba274fc65\") " pod="openstack/octavia-healthmanager-vd7kz" Jan 05 23:30:16 crc kubenswrapper[4910]: I0105 23:30:16.340744 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/1262f577-e64a-4760-818a-961ba274fc65-config-data-merged\") pod \"octavia-healthmanager-vd7kz\" (UID: \"1262f577-e64a-4760-818a-961ba274fc65\") " pod="openstack/octavia-healthmanager-vd7kz" Jan 05 23:30:16 crc kubenswrapper[4910]: I0105 23:30:16.340829 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/1262f577-e64a-4760-818a-961ba274fc65-hm-ports\") pod \"octavia-healthmanager-vd7kz\" (UID: \"1262f577-e64a-4760-818a-961ba274fc65\") " pod="openstack/octavia-healthmanager-vd7kz" Jan 05 23:30:16 crc kubenswrapper[4910]: I0105 23:30:16.340870 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1262f577-e64a-4760-818a-961ba274fc65-combined-ca-bundle\") pod \"octavia-healthmanager-vd7kz\" (UID: \"1262f577-e64a-4760-818a-961ba274fc65\") " pod="openstack/octavia-healthmanager-vd7kz" Jan 05 23:30:16 crc kubenswrapper[4910]: I0105 23:30:16.340899 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/1262f577-e64a-4760-818a-961ba274fc65-amphora-certs\") pod \"octavia-healthmanager-vd7kz\" (UID: \"1262f577-e64a-4760-818a-961ba274fc65\") " pod="openstack/octavia-healthmanager-vd7kz" Jan 05 23:30:16 crc kubenswrapper[4910]: I0105 23:30:16.340938 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1262f577-e64a-4760-818a-961ba274fc65-config-data\") pod \"octavia-healthmanager-vd7kz\" (UID: \"1262f577-e64a-4760-818a-961ba274fc65\") " pod="openstack/octavia-healthmanager-vd7kz" Jan 05 23:30:16 crc kubenswrapper[4910]: I0105 23:30:16.342913 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/1262f577-e64a-4760-818a-961ba274fc65-config-data-merged\") pod \"octavia-healthmanager-vd7kz\" (UID: \"1262f577-e64a-4760-818a-961ba274fc65\") " pod="openstack/octavia-healthmanager-vd7kz" Jan 05 23:30:16 crc kubenswrapper[4910]: I0105 23:30:16.343042 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/1262f577-e64a-4760-818a-961ba274fc65-hm-ports\") pod \"octavia-healthmanager-vd7kz\" (UID: \"1262f577-e64a-4760-818a-961ba274fc65\") " pod="openstack/octavia-healthmanager-vd7kz" Jan 05 23:30:16 crc kubenswrapper[4910]: I0105 23:30:16.349369 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1262f577-e64a-4760-818a-961ba274fc65-combined-ca-bundle\") pod \"octavia-healthmanager-vd7kz\" (UID: \"1262f577-e64a-4760-818a-961ba274fc65\") " pod="openstack/octavia-healthmanager-vd7kz" Jan 05 23:30:16 crc kubenswrapper[4910]: I0105 23:30:16.349392 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1262f577-e64a-4760-818a-961ba274fc65-config-data\") pod \"octavia-healthmanager-vd7kz\" (UID: \"1262f577-e64a-4760-818a-961ba274fc65\") " pod="openstack/octavia-healthmanager-vd7kz" Jan 05 23:30:16 crc kubenswrapper[4910]: I0105 23:30:16.350343 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/1262f577-e64a-4760-818a-961ba274fc65-amphora-certs\") pod \"octavia-healthmanager-vd7kz\" (UID: \"1262f577-e64a-4760-818a-961ba274fc65\") " pod="openstack/octavia-healthmanager-vd7kz" Jan 05 23:30:16 crc kubenswrapper[4910]: I0105 23:30:16.365288 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1262f577-e64a-4760-818a-961ba274fc65-scripts\") pod \"octavia-healthmanager-vd7kz\" (UID: \"1262f577-e64a-4760-818a-961ba274fc65\") " pod="openstack/octavia-healthmanager-vd7kz" Jan 05 23:30:16 crc kubenswrapper[4910]: I0105 23:30:16.459323 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-healthmanager-vd7kz" Jan 05 23:30:17 crc kubenswrapper[4910]: I0105 23:30:17.104693 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-healthmanager-vd7kz"] Jan 05 23:30:17 crc kubenswrapper[4910]: I0105 23:30:17.119485 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-vd7kz" event={"ID":"1262f577-e64a-4760-818a-961ba274fc65","Type":"ContainerStarted","Data":"32657ab73111f8fe9b674a13b1b8aeacdcabeb22595b28f1ea7aa481f11a6865"} Jan 05 23:30:17 crc kubenswrapper[4910]: I0105 23:30:17.642328 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-housekeeping-tm48g"] Jan 05 23:30:17 crc kubenswrapper[4910]: I0105 23:30:17.644778 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-housekeeping-tm48g" Jan 05 23:30:17 crc kubenswrapper[4910]: I0105 23:30:17.655436 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-housekeeping-tm48g"] Jan 05 23:30:17 crc kubenswrapper[4910]: I0105 23:30:17.655939 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-housekeeping-scripts" Jan 05 23:30:17 crc kubenswrapper[4910]: I0105 23:30:17.656207 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-housekeeping-config-data" Jan 05 23:30:17 crc kubenswrapper[4910]: I0105 23:30:17.798166 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/96c79f1a-fd70-4983-8ae6-e879e87c702d-hm-ports\") pod \"octavia-housekeeping-tm48g\" (UID: \"96c79f1a-fd70-4983-8ae6-e879e87c702d\") " pod="openstack/octavia-housekeeping-tm48g" Jan 05 23:30:17 crc kubenswrapper[4910]: I0105 23:30:17.798753 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/96c79f1a-fd70-4983-8ae6-e879e87c702d-config-data\") pod \"octavia-housekeeping-tm48g\" (UID: \"96c79f1a-fd70-4983-8ae6-e879e87c702d\") " pod="openstack/octavia-housekeeping-tm48g" Jan 05 23:30:17 crc kubenswrapper[4910]: I0105 23:30:17.798900 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/96c79f1a-fd70-4983-8ae6-e879e87c702d-scripts\") pod \"octavia-housekeeping-tm48g\" (UID: \"96c79f1a-fd70-4983-8ae6-e879e87c702d\") " pod="openstack/octavia-housekeeping-tm48g" Jan 05 23:30:17 crc kubenswrapper[4910]: I0105 23:30:17.798964 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/96c79f1a-fd70-4983-8ae6-e879e87c702d-amphora-certs\") pod \"octavia-housekeeping-tm48g\" (UID: \"96c79f1a-fd70-4983-8ae6-e879e87c702d\") " pod="openstack/octavia-housekeeping-tm48g" Jan 05 23:30:17 crc kubenswrapper[4910]: I0105 23:30:17.799091 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96c79f1a-fd70-4983-8ae6-e879e87c702d-combined-ca-bundle\") pod \"octavia-housekeeping-tm48g\" (UID: \"96c79f1a-fd70-4983-8ae6-e879e87c702d\") " pod="openstack/octavia-housekeeping-tm48g" Jan 05 23:30:17 crc kubenswrapper[4910]: I0105 23:30:17.799141 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/96c79f1a-fd70-4983-8ae6-e879e87c702d-config-data-merged\") pod \"octavia-housekeeping-tm48g\" (UID: \"96c79f1a-fd70-4983-8ae6-e879e87c702d\") " pod="openstack/octavia-housekeeping-tm48g" Jan 05 23:30:17 crc kubenswrapper[4910]: I0105 23:30:17.901420 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/96c79f1a-fd70-4983-8ae6-e879e87c702d-config-data\") pod \"octavia-housekeeping-tm48g\" (UID: \"96c79f1a-fd70-4983-8ae6-e879e87c702d\") " pod="openstack/octavia-housekeeping-tm48g" Jan 05 23:30:17 crc kubenswrapper[4910]: I0105 23:30:17.901505 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/96c79f1a-fd70-4983-8ae6-e879e87c702d-scripts\") pod \"octavia-housekeeping-tm48g\" (UID: \"96c79f1a-fd70-4983-8ae6-e879e87c702d\") " pod="openstack/octavia-housekeeping-tm48g" Jan 05 23:30:17 crc kubenswrapper[4910]: I0105 23:30:17.901548 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/96c79f1a-fd70-4983-8ae6-e879e87c702d-amphora-certs\") pod \"octavia-housekeeping-tm48g\" (UID: \"96c79f1a-fd70-4983-8ae6-e879e87c702d\") " pod="openstack/octavia-housekeeping-tm48g" Jan 05 23:30:17 crc kubenswrapper[4910]: I0105 23:30:17.901597 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96c79f1a-fd70-4983-8ae6-e879e87c702d-combined-ca-bundle\") pod \"octavia-housekeeping-tm48g\" (UID: \"96c79f1a-fd70-4983-8ae6-e879e87c702d\") " pod="openstack/octavia-housekeeping-tm48g" Jan 05 23:30:17 crc kubenswrapper[4910]: I0105 23:30:17.901623 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/96c79f1a-fd70-4983-8ae6-e879e87c702d-config-data-merged\") pod \"octavia-housekeeping-tm48g\" (UID: \"96c79f1a-fd70-4983-8ae6-e879e87c702d\") " pod="openstack/octavia-housekeeping-tm48g" Jan 05 23:30:17 crc kubenswrapper[4910]: I0105 23:30:17.901696 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/96c79f1a-fd70-4983-8ae6-e879e87c702d-hm-ports\") pod \"octavia-housekeeping-tm48g\" (UID: \"96c79f1a-fd70-4983-8ae6-e879e87c702d\") " pod="openstack/octavia-housekeeping-tm48g" Jan 05 23:30:17 crc kubenswrapper[4910]: I0105 23:30:17.902569 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/96c79f1a-fd70-4983-8ae6-e879e87c702d-config-data-merged\") pod \"octavia-housekeeping-tm48g\" (UID: \"96c79f1a-fd70-4983-8ae6-e879e87c702d\") " pod="openstack/octavia-housekeeping-tm48g" Jan 05 23:30:17 crc kubenswrapper[4910]: I0105 23:30:17.903428 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/96c79f1a-fd70-4983-8ae6-e879e87c702d-hm-ports\") pod \"octavia-housekeeping-tm48g\" (UID: \"96c79f1a-fd70-4983-8ae6-e879e87c702d\") " pod="openstack/octavia-housekeeping-tm48g" Jan 05 23:30:17 crc kubenswrapper[4910]: I0105 23:30:17.909797 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/96c79f1a-fd70-4983-8ae6-e879e87c702d-amphora-certs\") pod \"octavia-housekeeping-tm48g\" (UID: \"96c79f1a-fd70-4983-8ae6-e879e87c702d\") " pod="openstack/octavia-housekeeping-tm48g" Jan 05 23:30:17 crc kubenswrapper[4910]: I0105 23:30:17.910659 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/96c79f1a-fd70-4983-8ae6-e879e87c702d-scripts\") pod \"octavia-housekeeping-tm48g\" (UID: \"96c79f1a-fd70-4983-8ae6-e879e87c702d\") " pod="openstack/octavia-housekeeping-tm48g" Jan 05 23:30:17 crc kubenswrapper[4910]: I0105 23:30:17.913023 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96c79f1a-fd70-4983-8ae6-e879e87c702d-combined-ca-bundle\") pod \"octavia-housekeeping-tm48g\" (UID: \"96c79f1a-fd70-4983-8ae6-e879e87c702d\") " pod="openstack/octavia-housekeeping-tm48g" Jan 05 23:30:17 crc kubenswrapper[4910]: I0105 23:30:17.923168 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/96c79f1a-fd70-4983-8ae6-e879e87c702d-config-data\") pod \"octavia-housekeeping-tm48g\" (UID: \"96c79f1a-fd70-4983-8ae6-e879e87c702d\") " pod="openstack/octavia-housekeeping-tm48g" Jan 05 23:30:18 crc kubenswrapper[4910]: I0105 23:30:18.011251 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-housekeeping-tm48g" Jan 05 23:30:18 crc kubenswrapper[4910]: I0105 23:30:18.130905 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-vd7kz" event={"ID":"1262f577-e64a-4760-818a-961ba274fc65","Type":"ContainerStarted","Data":"06758f0cf830243835a06da934896acdb87546e65f1cc21250f8a5b7f2dc593f"} Jan 05 23:30:18 crc kubenswrapper[4910]: W0105 23:30:18.572539 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod96c79f1a_fd70_4983_8ae6_e879e87c702d.slice/crio-6e67283a195a7c783239639f237a7df09708a0d7a47d65e853cb863f9a936072 WatchSource:0}: Error finding container 6e67283a195a7c783239639f237a7df09708a0d7a47d65e853cb863f9a936072: Status 404 returned error can't find the container with id 6e67283a195a7c783239639f237a7df09708a0d7a47d65e853cb863f9a936072 Jan 05 23:30:18 crc kubenswrapper[4910]: I0105 23:30:18.573833 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-housekeeping-tm48g"] Jan 05 23:30:19 crc kubenswrapper[4910]: I0105 23:30:19.014561 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-worker-r2r2n"] Jan 05 23:30:19 crc kubenswrapper[4910]: I0105 23:30:19.017505 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-worker-r2r2n" Jan 05 23:30:19 crc kubenswrapper[4910]: I0105 23:30:19.022918 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-worker-config-data" Jan 05 23:30:19 crc kubenswrapper[4910]: I0105 23:30:19.022979 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-worker-scripts" Jan 05 23:30:19 crc kubenswrapper[4910]: I0105 23:30:19.039644 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-worker-r2r2n"] Jan 05 23:30:19 crc kubenswrapper[4910]: I0105 23:30:19.128806 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/8f3b2391-d3d6-4a8a-92ca-b10efda049f1-hm-ports\") pod \"octavia-worker-r2r2n\" (UID: \"8f3b2391-d3d6-4a8a-92ca-b10efda049f1\") " pod="openstack/octavia-worker-r2r2n" Jan 05 23:30:19 crc kubenswrapper[4910]: I0105 23:30:19.129082 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/8f3b2391-d3d6-4a8a-92ca-b10efda049f1-config-data-merged\") pod \"octavia-worker-r2r2n\" (UID: \"8f3b2391-d3d6-4a8a-92ca-b10efda049f1\") " pod="openstack/octavia-worker-r2r2n" Jan 05 23:30:19 crc kubenswrapper[4910]: I0105 23:30:19.129347 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f3b2391-d3d6-4a8a-92ca-b10efda049f1-scripts\") pod \"octavia-worker-r2r2n\" (UID: \"8f3b2391-d3d6-4a8a-92ca-b10efda049f1\") " pod="openstack/octavia-worker-r2r2n" Jan 05 23:30:19 crc kubenswrapper[4910]: I0105 23:30:19.129421 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/8f3b2391-d3d6-4a8a-92ca-b10efda049f1-amphora-certs\") pod \"octavia-worker-r2r2n\" (UID: \"8f3b2391-d3d6-4a8a-92ca-b10efda049f1\") " pod="openstack/octavia-worker-r2r2n" Jan 05 23:30:19 crc kubenswrapper[4910]: I0105 23:30:19.129495 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f3b2391-d3d6-4a8a-92ca-b10efda049f1-config-data\") pod \"octavia-worker-r2r2n\" (UID: \"8f3b2391-d3d6-4a8a-92ca-b10efda049f1\") " pod="openstack/octavia-worker-r2r2n" Jan 05 23:30:19 crc kubenswrapper[4910]: I0105 23:30:19.129546 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f3b2391-d3d6-4a8a-92ca-b10efda049f1-combined-ca-bundle\") pod \"octavia-worker-r2r2n\" (UID: \"8f3b2391-d3d6-4a8a-92ca-b10efda049f1\") " pod="openstack/octavia-worker-r2r2n" Jan 05 23:30:19 crc kubenswrapper[4910]: I0105 23:30:19.142116 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-tm48g" event={"ID":"96c79f1a-fd70-4983-8ae6-e879e87c702d","Type":"ContainerStarted","Data":"6e67283a195a7c783239639f237a7df09708a0d7a47d65e853cb863f9a936072"} Jan 05 23:30:19 crc kubenswrapper[4910]: I0105 23:30:19.232071 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/8f3b2391-d3d6-4a8a-92ca-b10efda049f1-config-data-merged\") pod \"octavia-worker-r2r2n\" (UID: \"8f3b2391-d3d6-4a8a-92ca-b10efda049f1\") " pod="openstack/octavia-worker-r2r2n" Jan 05 23:30:19 crc kubenswrapper[4910]: I0105 23:30:19.232572 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f3b2391-d3d6-4a8a-92ca-b10efda049f1-scripts\") pod \"octavia-worker-r2r2n\" (UID: \"8f3b2391-d3d6-4a8a-92ca-b10efda049f1\") " pod="openstack/octavia-worker-r2r2n" Jan 05 23:30:19 crc kubenswrapper[4910]: I0105 23:30:19.232668 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/8f3b2391-d3d6-4a8a-92ca-b10efda049f1-amphora-certs\") pod \"octavia-worker-r2r2n\" (UID: \"8f3b2391-d3d6-4a8a-92ca-b10efda049f1\") " pod="openstack/octavia-worker-r2r2n" Jan 05 23:30:19 crc kubenswrapper[4910]: I0105 23:30:19.232745 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f3b2391-d3d6-4a8a-92ca-b10efda049f1-config-data\") pod \"octavia-worker-r2r2n\" (UID: \"8f3b2391-d3d6-4a8a-92ca-b10efda049f1\") " pod="openstack/octavia-worker-r2r2n" Jan 05 23:30:19 crc kubenswrapper[4910]: I0105 23:30:19.232804 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f3b2391-d3d6-4a8a-92ca-b10efda049f1-combined-ca-bundle\") pod \"octavia-worker-r2r2n\" (UID: \"8f3b2391-d3d6-4a8a-92ca-b10efda049f1\") " pod="openstack/octavia-worker-r2r2n" Jan 05 23:30:19 crc kubenswrapper[4910]: I0105 23:30:19.232818 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/8f3b2391-d3d6-4a8a-92ca-b10efda049f1-config-data-merged\") pod \"octavia-worker-r2r2n\" (UID: \"8f3b2391-d3d6-4a8a-92ca-b10efda049f1\") " pod="openstack/octavia-worker-r2r2n" Jan 05 23:30:19 crc kubenswrapper[4910]: I0105 23:30:19.233069 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/8f3b2391-d3d6-4a8a-92ca-b10efda049f1-hm-ports\") pod \"octavia-worker-r2r2n\" (UID: \"8f3b2391-d3d6-4a8a-92ca-b10efda049f1\") " pod="openstack/octavia-worker-r2r2n" Jan 05 23:30:19 crc kubenswrapper[4910]: I0105 23:30:19.240600 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/8f3b2391-d3d6-4a8a-92ca-b10efda049f1-amphora-certs\") pod \"octavia-worker-r2r2n\" (UID: \"8f3b2391-d3d6-4a8a-92ca-b10efda049f1\") " pod="openstack/octavia-worker-r2r2n" Jan 05 23:30:19 crc kubenswrapper[4910]: I0105 23:30:19.241920 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f3b2391-d3d6-4a8a-92ca-b10efda049f1-combined-ca-bundle\") pod \"octavia-worker-r2r2n\" (UID: \"8f3b2391-d3d6-4a8a-92ca-b10efda049f1\") " pod="openstack/octavia-worker-r2r2n" Jan 05 23:30:19 crc kubenswrapper[4910]: I0105 23:30:19.242362 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/8f3b2391-d3d6-4a8a-92ca-b10efda049f1-hm-ports\") pod \"octavia-worker-r2r2n\" (UID: \"8f3b2391-d3d6-4a8a-92ca-b10efda049f1\") " pod="openstack/octavia-worker-r2r2n" Jan 05 23:30:19 crc kubenswrapper[4910]: I0105 23:30:19.243068 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f3b2391-d3d6-4a8a-92ca-b10efda049f1-config-data\") pod \"octavia-worker-r2r2n\" (UID: \"8f3b2391-d3d6-4a8a-92ca-b10efda049f1\") " pod="openstack/octavia-worker-r2r2n" Jan 05 23:30:19 crc kubenswrapper[4910]: I0105 23:30:19.245412 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f3b2391-d3d6-4a8a-92ca-b10efda049f1-scripts\") pod \"octavia-worker-r2r2n\" (UID: \"8f3b2391-d3d6-4a8a-92ca-b10efda049f1\") " pod="openstack/octavia-worker-r2r2n" Jan 05 23:30:19 crc kubenswrapper[4910]: I0105 23:30:19.379729 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-worker-r2r2n" Jan 05 23:30:19 crc kubenswrapper[4910]: I0105 23:30:19.866468 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-worker-r2r2n"] Jan 05 23:30:20 crc kubenswrapper[4910]: I0105 23:30:20.171260 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-r2r2n" event={"ID":"8f3b2391-d3d6-4a8a-92ca-b10efda049f1","Type":"ContainerStarted","Data":"726f08eebdfd9101c2a48a8dc1654fb758d6fc93f05c7167c5cee6694a9efbb5"} Jan 05 23:30:20 crc kubenswrapper[4910]: I0105 23:30:20.179572 4910 generic.go:334] "Generic (PLEG): container finished" podID="1262f577-e64a-4760-818a-961ba274fc65" containerID="06758f0cf830243835a06da934896acdb87546e65f1cc21250f8a5b7f2dc593f" exitCode=0 Jan 05 23:30:20 crc kubenswrapper[4910]: I0105 23:30:20.179766 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-vd7kz" event={"ID":"1262f577-e64a-4760-818a-961ba274fc65","Type":"ContainerDied","Data":"06758f0cf830243835a06da934896acdb87546e65f1cc21250f8a5b7f2dc593f"} Jan 05 23:30:21 crc kubenswrapper[4910]: I0105 23:30:21.193313 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-tm48g" event={"ID":"96c79f1a-fd70-4983-8ae6-e879e87c702d","Type":"ContainerStarted","Data":"dbaef9379aecfa6bfa1d6a4ddc8afb3be8e014d3b05fb1d57e3bb1f8ff52f326"} Jan 05 23:30:21 crc kubenswrapper[4910]: I0105 23:30:21.196858 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-vd7kz" event={"ID":"1262f577-e64a-4760-818a-961ba274fc65","Type":"ContainerStarted","Data":"7a119ffccce6cc7429e65e1600c9be8c46ea61f8a8ae01a03d94d4a19f1940f6"} Jan 05 23:30:21 crc kubenswrapper[4910]: I0105 23:30:21.197291 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-healthmanager-vd7kz" Jan 05 23:30:21 crc kubenswrapper[4910]: I0105 23:30:21.250831 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-healthmanager-vd7kz" podStartSLOduration=5.250803867 podStartE2EDuration="5.250803867s" podCreationTimestamp="2026-01-05 23:30:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:30:21.233204411 +0000 UTC m=+5952.810702081" watchObservedRunningTime="2026-01-05 23:30:21.250803867 +0000 UTC m=+5952.828301577" Jan 05 23:30:22 crc kubenswrapper[4910]: I0105 23:30:22.207361 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-r2r2n" event={"ID":"8f3b2391-d3d6-4a8a-92ca-b10efda049f1","Type":"ContainerStarted","Data":"965f522ea51e2e6ed270237d5d2263a734fec4f5b63407c8727eab1c4884dc19"} Jan 05 23:30:22 crc kubenswrapper[4910]: I0105 23:30:22.210760 4910 generic.go:334] "Generic (PLEG): container finished" podID="96c79f1a-fd70-4983-8ae6-e879e87c702d" containerID="dbaef9379aecfa6bfa1d6a4ddc8afb3be8e014d3b05fb1d57e3bb1f8ff52f326" exitCode=0 Jan 05 23:30:22 crc kubenswrapper[4910]: I0105 23:30:22.211275 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-tm48g" event={"ID":"96c79f1a-fd70-4983-8ae6-e879e87c702d","Type":"ContainerDied","Data":"dbaef9379aecfa6bfa1d6a4ddc8afb3be8e014d3b05fb1d57e3bb1f8ff52f326"} Jan 05 23:30:23 crc kubenswrapper[4910]: I0105 23:30:23.220909 4910 generic.go:334] "Generic (PLEG): container finished" podID="8f3b2391-d3d6-4a8a-92ca-b10efda049f1" containerID="965f522ea51e2e6ed270237d5d2263a734fec4f5b63407c8727eab1c4884dc19" exitCode=0 Jan 05 23:30:23 crc kubenswrapper[4910]: I0105 23:30:23.221450 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-r2r2n" event={"ID":"8f3b2391-d3d6-4a8a-92ca-b10efda049f1","Type":"ContainerDied","Data":"965f522ea51e2e6ed270237d5d2263a734fec4f5b63407c8727eab1c4884dc19"} Jan 05 23:30:23 crc kubenswrapper[4910]: I0105 23:30:23.237152 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-tm48g" event={"ID":"96c79f1a-fd70-4983-8ae6-e879e87c702d","Type":"ContainerStarted","Data":"eed6ede323e93f7c77d3e8fb65913af150bccef7b0d8aaaebd9a16dc0dd70059"} Jan 05 23:30:23 crc kubenswrapper[4910]: I0105 23:30:23.237410 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-housekeeping-tm48g" Jan 05 23:30:23 crc kubenswrapper[4910]: I0105 23:30:23.264662 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-housekeeping-tm48g" podStartSLOduration=4.431519342 podStartE2EDuration="6.264639145s" podCreationTimestamp="2026-01-05 23:30:17 +0000 UTC" firstStartedPulling="2026-01-05 23:30:18.575640888 +0000 UTC m=+5950.153138588" lastFinishedPulling="2026-01-05 23:30:20.408760721 +0000 UTC m=+5951.986258391" observedRunningTime="2026-01-05 23:30:23.260602275 +0000 UTC m=+5954.838100035" watchObservedRunningTime="2026-01-05 23:30:23.264639145 +0000 UTC m=+5954.842136825" Jan 05 23:30:24 crc kubenswrapper[4910]: I0105 23:30:24.249619 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-r2r2n" event={"ID":"8f3b2391-d3d6-4a8a-92ca-b10efda049f1","Type":"ContainerStarted","Data":"757045ca36d7ebf346b319852e4806aa0dad6d95af094985ef1a956b60e3737e"} Jan 05 23:30:24 crc kubenswrapper[4910]: I0105 23:30:24.250161 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-worker-r2r2n" Jan 05 23:30:24 crc kubenswrapper[4910]: I0105 23:30:24.271537 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-worker-r2r2n" podStartSLOduration=4.850510057 podStartE2EDuration="6.271517853s" podCreationTimestamp="2026-01-05 23:30:18 +0000 UTC" firstStartedPulling="2026-01-05 23:30:19.876428926 +0000 UTC m=+5951.453926596" lastFinishedPulling="2026-01-05 23:30:21.297436722 +0000 UTC m=+5952.874934392" observedRunningTime="2026-01-05 23:30:24.265209967 +0000 UTC m=+5955.842707637" watchObservedRunningTime="2026-01-05 23:30:24.271517853 +0000 UTC m=+5955.849015513" Jan 05 23:30:31 crc kubenswrapper[4910]: I0105 23:30:31.519721 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-healthmanager-vd7kz" Jan 05 23:30:33 crc kubenswrapper[4910]: I0105 23:30:33.051736 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-housekeeping-tm48g" Jan 05 23:30:34 crc kubenswrapper[4910]: I0105 23:30:34.415289 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-worker-r2r2n" Jan 05 23:30:40 crc kubenswrapper[4910]: I0105 23:30:40.952511 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 23:30:40 crc kubenswrapper[4910]: I0105 23:30:40.953037 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 23:30:44 crc kubenswrapper[4910]: I0105 23:30:44.222990 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-hkttx"] Jan 05 23:30:44 crc kubenswrapper[4910]: I0105 23:30:44.229271 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hkttx" Jan 05 23:30:44 crc kubenswrapper[4910]: I0105 23:30:44.232855 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hkttx"] Jan 05 23:30:44 crc kubenswrapper[4910]: I0105 23:30:44.362273 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rsjsx\" (UniqueName: \"kubernetes.io/projected/08a8538f-8a68-4526-bf6c-4af7abfe43d0-kube-api-access-rsjsx\") pod \"certified-operators-hkttx\" (UID: \"08a8538f-8a68-4526-bf6c-4af7abfe43d0\") " pod="openshift-marketplace/certified-operators-hkttx" Jan 05 23:30:44 crc kubenswrapper[4910]: I0105 23:30:44.362347 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08a8538f-8a68-4526-bf6c-4af7abfe43d0-catalog-content\") pod \"certified-operators-hkttx\" (UID: \"08a8538f-8a68-4526-bf6c-4af7abfe43d0\") " pod="openshift-marketplace/certified-operators-hkttx" Jan 05 23:30:44 crc kubenswrapper[4910]: I0105 23:30:44.362513 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08a8538f-8a68-4526-bf6c-4af7abfe43d0-utilities\") pod \"certified-operators-hkttx\" (UID: \"08a8538f-8a68-4526-bf6c-4af7abfe43d0\") " pod="openshift-marketplace/certified-operators-hkttx" Jan 05 23:30:44 crc kubenswrapper[4910]: I0105 23:30:44.464573 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08a8538f-8a68-4526-bf6c-4af7abfe43d0-utilities\") pod \"certified-operators-hkttx\" (UID: \"08a8538f-8a68-4526-bf6c-4af7abfe43d0\") " pod="openshift-marketplace/certified-operators-hkttx" Jan 05 23:30:44 crc kubenswrapper[4910]: I0105 23:30:44.464721 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rsjsx\" (UniqueName: \"kubernetes.io/projected/08a8538f-8a68-4526-bf6c-4af7abfe43d0-kube-api-access-rsjsx\") pod \"certified-operators-hkttx\" (UID: \"08a8538f-8a68-4526-bf6c-4af7abfe43d0\") " pod="openshift-marketplace/certified-operators-hkttx" Jan 05 23:30:44 crc kubenswrapper[4910]: I0105 23:30:44.464760 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08a8538f-8a68-4526-bf6c-4af7abfe43d0-catalog-content\") pod \"certified-operators-hkttx\" (UID: \"08a8538f-8a68-4526-bf6c-4af7abfe43d0\") " pod="openshift-marketplace/certified-operators-hkttx" Jan 05 23:30:44 crc kubenswrapper[4910]: I0105 23:30:44.465393 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08a8538f-8a68-4526-bf6c-4af7abfe43d0-catalog-content\") pod \"certified-operators-hkttx\" (UID: \"08a8538f-8a68-4526-bf6c-4af7abfe43d0\") " pod="openshift-marketplace/certified-operators-hkttx" Jan 05 23:30:44 crc kubenswrapper[4910]: I0105 23:30:44.465694 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08a8538f-8a68-4526-bf6c-4af7abfe43d0-utilities\") pod \"certified-operators-hkttx\" (UID: \"08a8538f-8a68-4526-bf6c-4af7abfe43d0\") " pod="openshift-marketplace/certified-operators-hkttx" Jan 05 23:30:44 crc kubenswrapper[4910]: I0105 23:30:44.485976 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rsjsx\" (UniqueName: \"kubernetes.io/projected/08a8538f-8a68-4526-bf6c-4af7abfe43d0-kube-api-access-rsjsx\") pod \"certified-operators-hkttx\" (UID: \"08a8538f-8a68-4526-bf6c-4af7abfe43d0\") " pod="openshift-marketplace/certified-operators-hkttx" Jan 05 23:30:44 crc kubenswrapper[4910]: I0105 23:30:44.558239 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hkttx" Jan 05 23:30:45 crc kubenswrapper[4910]: I0105 23:30:45.123345 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hkttx"] Jan 05 23:30:45 crc kubenswrapper[4910]: I0105 23:30:45.530887 4910 generic.go:334] "Generic (PLEG): container finished" podID="08a8538f-8a68-4526-bf6c-4af7abfe43d0" containerID="40b22353e93f9dc2a4168509c55b8de6c0e070f62d99ad912553d00da7b35852" exitCode=0 Jan 05 23:30:45 crc kubenswrapper[4910]: I0105 23:30:45.530969 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hkttx" event={"ID":"08a8538f-8a68-4526-bf6c-4af7abfe43d0","Type":"ContainerDied","Data":"40b22353e93f9dc2a4168509c55b8de6c0e070f62d99ad912553d00da7b35852"} Jan 05 23:30:45 crc kubenswrapper[4910]: I0105 23:30:45.531286 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hkttx" event={"ID":"08a8538f-8a68-4526-bf6c-4af7abfe43d0","Type":"ContainerStarted","Data":"5f470e2f8817ef7fec5d2aa73138eed176b3d6aa09ace4480b64512c01a2f5dc"} Jan 05 23:30:46 crc kubenswrapper[4910]: I0105 23:30:46.522760 4910 scope.go:117] "RemoveContainer" containerID="f7896415c95fe8b75b3bb8f093c88e6065975dc0d26cc7bb7680a45e569b8209" Jan 05 23:30:46 crc kubenswrapper[4910]: I0105 23:30:46.541230 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hkttx" event={"ID":"08a8538f-8a68-4526-bf6c-4af7abfe43d0","Type":"ContainerStarted","Data":"cedb47a2d240af49126009a29b8c24befd69a56b8607485b30d2d03732629283"} Jan 05 23:30:46 crc kubenswrapper[4910]: I0105 23:30:46.546164 4910 scope.go:117] "RemoveContainer" containerID="2191725ea8ecb9f8c829b71cbb233cb1f98d51ccfd1a7b66d9593a9626461a69" Jan 05 23:30:47 crc kubenswrapper[4910]: I0105 23:30:47.551707 4910 generic.go:334] "Generic (PLEG): container finished" podID="08a8538f-8a68-4526-bf6c-4af7abfe43d0" containerID="cedb47a2d240af49126009a29b8c24befd69a56b8607485b30d2d03732629283" exitCode=0 Jan 05 23:30:47 crc kubenswrapper[4910]: I0105 23:30:47.551767 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hkttx" event={"ID":"08a8538f-8a68-4526-bf6c-4af7abfe43d0","Type":"ContainerDied","Data":"cedb47a2d240af49126009a29b8c24befd69a56b8607485b30d2d03732629283"} Jan 05 23:30:49 crc kubenswrapper[4910]: I0105 23:30:49.604814 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hkttx" event={"ID":"08a8538f-8a68-4526-bf6c-4af7abfe43d0","Type":"ContainerStarted","Data":"01ba16019bd5da1680a49d5991359ec88240b1d5edb0ce4f9c28262e3af313a1"} Jan 05 23:30:49 crc kubenswrapper[4910]: I0105 23:30:49.641250 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-hkttx" podStartSLOduration=2.8412157860000002 podStartE2EDuration="5.641228326s" podCreationTimestamp="2026-01-05 23:30:44 +0000 UTC" firstStartedPulling="2026-01-05 23:30:45.53281952 +0000 UTC m=+5977.110317190" lastFinishedPulling="2026-01-05 23:30:48.33283205 +0000 UTC m=+5979.910329730" observedRunningTime="2026-01-05 23:30:49.638176921 +0000 UTC m=+5981.215674611" watchObservedRunningTime="2026-01-05 23:30:49.641228326 +0000 UTC m=+5981.218726016" Jan 05 23:30:54 crc kubenswrapper[4910]: I0105 23:30:54.558811 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-hkttx" Jan 05 23:30:54 crc kubenswrapper[4910]: I0105 23:30:54.561101 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-hkttx" Jan 05 23:30:54 crc kubenswrapper[4910]: I0105 23:30:54.648709 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-hkttx" Jan 05 23:30:54 crc kubenswrapper[4910]: I0105 23:30:54.712707 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-hkttx" Jan 05 23:30:54 crc kubenswrapper[4910]: I0105 23:30:54.893619 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hkttx"] Jan 05 23:30:56 crc kubenswrapper[4910]: I0105 23:30:56.677846 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-hkttx" podUID="08a8538f-8a68-4526-bf6c-4af7abfe43d0" containerName="registry-server" containerID="cri-o://01ba16019bd5da1680a49d5991359ec88240b1d5edb0ce4f9c28262e3af313a1" gracePeriod=2 Jan 05 23:30:57 crc kubenswrapper[4910]: I0105 23:30:57.265486 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hkttx" Jan 05 23:30:57 crc kubenswrapper[4910]: I0105 23:30:57.347227 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08a8538f-8a68-4526-bf6c-4af7abfe43d0-catalog-content\") pod \"08a8538f-8a68-4526-bf6c-4af7abfe43d0\" (UID: \"08a8538f-8a68-4526-bf6c-4af7abfe43d0\") " Jan 05 23:30:57 crc kubenswrapper[4910]: I0105 23:30:57.347440 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rsjsx\" (UniqueName: \"kubernetes.io/projected/08a8538f-8a68-4526-bf6c-4af7abfe43d0-kube-api-access-rsjsx\") pod \"08a8538f-8a68-4526-bf6c-4af7abfe43d0\" (UID: \"08a8538f-8a68-4526-bf6c-4af7abfe43d0\") " Jan 05 23:30:57 crc kubenswrapper[4910]: I0105 23:30:57.347477 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08a8538f-8a68-4526-bf6c-4af7abfe43d0-utilities\") pod \"08a8538f-8a68-4526-bf6c-4af7abfe43d0\" (UID: \"08a8538f-8a68-4526-bf6c-4af7abfe43d0\") " Jan 05 23:30:57 crc kubenswrapper[4910]: I0105 23:30:57.348885 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/08a8538f-8a68-4526-bf6c-4af7abfe43d0-utilities" (OuterVolumeSpecName: "utilities") pod "08a8538f-8a68-4526-bf6c-4af7abfe43d0" (UID: "08a8538f-8a68-4526-bf6c-4af7abfe43d0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:30:57 crc kubenswrapper[4910]: I0105 23:30:57.352635 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08a8538f-8a68-4526-bf6c-4af7abfe43d0-kube-api-access-rsjsx" (OuterVolumeSpecName: "kube-api-access-rsjsx") pod "08a8538f-8a68-4526-bf6c-4af7abfe43d0" (UID: "08a8538f-8a68-4526-bf6c-4af7abfe43d0"). InnerVolumeSpecName "kube-api-access-rsjsx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:30:57 crc kubenswrapper[4910]: I0105 23:30:57.397775 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/08a8538f-8a68-4526-bf6c-4af7abfe43d0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "08a8538f-8a68-4526-bf6c-4af7abfe43d0" (UID: "08a8538f-8a68-4526-bf6c-4af7abfe43d0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:30:57 crc kubenswrapper[4910]: I0105 23:30:57.450206 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08a8538f-8a68-4526-bf6c-4af7abfe43d0-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 23:30:57 crc kubenswrapper[4910]: I0105 23:30:57.450239 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rsjsx\" (UniqueName: \"kubernetes.io/projected/08a8538f-8a68-4526-bf6c-4af7abfe43d0-kube-api-access-rsjsx\") on node \"crc\" DevicePath \"\"" Jan 05 23:30:57 crc kubenswrapper[4910]: I0105 23:30:57.450250 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08a8538f-8a68-4526-bf6c-4af7abfe43d0-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 23:30:57 crc kubenswrapper[4910]: I0105 23:30:57.690665 4910 generic.go:334] "Generic (PLEG): container finished" podID="08a8538f-8a68-4526-bf6c-4af7abfe43d0" containerID="01ba16019bd5da1680a49d5991359ec88240b1d5edb0ce4f9c28262e3af313a1" exitCode=0 Jan 05 23:30:57 crc kubenswrapper[4910]: I0105 23:30:57.690719 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hkttx" event={"ID":"08a8538f-8a68-4526-bf6c-4af7abfe43d0","Type":"ContainerDied","Data":"01ba16019bd5da1680a49d5991359ec88240b1d5edb0ce4f9c28262e3af313a1"} Jan 05 23:30:57 crc kubenswrapper[4910]: I0105 23:30:57.690763 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hkttx" event={"ID":"08a8538f-8a68-4526-bf6c-4af7abfe43d0","Type":"ContainerDied","Data":"5f470e2f8817ef7fec5d2aa73138eed176b3d6aa09ace4480b64512c01a2f5dc"} Jan 05 23:30:57 crc kubenswrapper[4910]: I0105 23:30:57.690791 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hkttx" Jan 05 23:30:57 crc kubenswrapper[4910]: I0105 23:30:57.690797 4910 scope.go:117] "RemoveContainer" containerID="01ba16019bd5da1680a49d5991359ec88240b1d5edb0ce4f9c28262e3af313a1" Jan 05 23:30:57 crc kubenswrapper[4910]: I0105 23:30:57.735228 4910 scope.go:117] "RemoveContainer" containerID="cedb47a2d240af49126009a29b8c24befd69a56b8607485b30d2d03732629283" Jan 05 23:30:57 crc kubenswrapper[4910]: I0105 23:30:57.738198 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hkttx"] Jan 05 23:30:57 crc kubenswrapper[4910]: I0105 23:30:57.753486 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-hkttx"] Jan 05 23:30:57 crc kubenswrapper[4910]: I0105 23:30:57.778238 4910 scope.go:117] "RemoveContainer" containerID="40b22353e93f9dc2a4168509c55b8de6c0e070f62d99ad912553d00da7b35852" Jan 05 23:30:57 crc kubenswrapper[4910]: I0105 23:30:57.828372 4910 scope.go:117] "RemoveContainer" containerID="01ba16019bd5da1680a49d5991359ec88240b1d5edb0ce4f9c28262e3af313a1" Jan 05 23:30:57 crc kubenswrapper[4910]: E0105 23:30:57.828919 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01ba16019bd5da1680a49d5991359ec88240b1d5edb0ce4f9c28262e3af313a1\": container with ID starting with 01ba16019bd5da1680a49d5991359ec88240b1d5edb0ce4f9c28262e3af313a1 not found: ID does not exist" containerID="01ba16019bd5da1680a49d5991359ec88240b1d5edb0ce4f9c28262e3af313a1" Jan 05 23:30:57 crc kubenswrapper[4910]: I0105 23:30:57.828984 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01ba16019bd5da1680a49d5991359ec88240b1d5edb0ce4f9c28262e3af313a1"} err="failed to get container status \"01ba16019bd5da1680a49d5991359ec88240b1d5edb0ce4f9c28262e3af313a1\": rpc error: code = NotFound desc = could not find container \"01ba16019bd5da1680a49d5991359ec88240b1d5edb0ce4f9c28262e3af313a1\": container with ID starting with 01ba16019bd5da1680a49d5991359ec88240b1d5edb0ce4f9c28262e3af313a1 not found: ID does not exist" Jan 05 23:30:57 crc kubenswrapper[4910]: I0105 23:30:57.829024 4910 scope.go:117] "RemoveContainer" containerID="cedb47a2d240af49126009a29b8c24befd69a56b8607485b30d2d03732629283" Jan 05 23:30:57 crc kubenswrapper[4910]: E0105 23:30:57.829644 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cedb47a2d240af49126009a29b8c24befd69a56b8607485b30d2d03732629283\": container with ID starting with cedb47a2d240af49126009a29b8c24befd69a56b8607485b30d2d03732629283 not found: ID does not exist" containerID="cedb47a2d240af49126009a29b8c24befd69a56b8607485b30d2d03732629283" Jan 05 23:30:57 crc kubenswrapper[4910]: I0105 23:30:57.829714 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cedb47a2d240af49126009a29b8c24befd69a56b8607485b30d2d03732629283"} err="failed to get container status \"cedb47a2d240af49126009a29b8c24befd69a56b8607485b30d2d03732629283\": rpc error: code = NotFound desc = could not find container \"cedb47a2d240af49126009a29b8c24befd69a56b8607485b30d2d03732629283\": container with ID starting with cedb47a2d240af49126009a29b8c24befd69a56b8607485b30d2d03732629283 not found: ID does not exist" Jan 05 23:30:57 crc kubenswrapper[4910]: I0105 23:30:57.829760 4910 scope.go:117] "RemoveContainer" containerID="40b22353e93f9dc2a4168509c55b8de6c0e070f62d99ad912553d00da7b35852" Jan 05 23:30:57 crc kubenswrapper[4910]: E0105 23:30:57.830541 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"40b22353e93f9dc2a4168509c55b8de6c0e070f62d99ad912553d00da7b35852\": container with ID starting with 40b22353e93f9dc2a4168509c55b8de6c0e070f62d99ad912553d00da7b35852 not found: ID does not exist" containerID="40b22353e93f9dc2a4168509c55b8de6c0e070f62d99ad912553d00da7b35852" Jan 05 23:30:57 crc kubenswrapper[4910]: I0105 23:30:57.830574 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"40b22353e93f9dc2a4168509c55b8de6c0e070f62d99ad912553d00da7b35852"} err="failed to get container status \"40b22353e93f9dc2a4168509c55b8de6c0e070f62d99ad912553d00da7b35852\": rpc error: code = NotFound desc = could not find container \"40b22353e93f9dc2a4168509c55b8de6c0e070f62d99ad912553d00da7b35852\": container with ID starting with 40b22353e93f9dc2a4168509c55b8de6c0e070f62d99ad912553d00da7b35852 not found: ID does not exist" Jan 05 23:30:58 crc kubenswrapper[4910]: I0105 23:30:58.734110 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08a8538f-8a68-4526-bf6c-4af7abfe43d0" path="/var/lib/kubelet/pods/08a8538f-8a68-4526-bf6c-4af7abfe43d0/volumes" Jan 05 23:31:07 crc kubenswrapper[4910]: I0105 23:31:07.073318 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-6a14-account-create-update-4g5tk"] Jan 05 23:31:07 crc kubenswrapper[4910]: I0105 23:31:07.088596 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-snkkh"] Jan 05 23:31:07 crc kubenswrapper[4910]: I0105 23:31:07.100948 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-6a14-account-create-update-4g5tk"] Jan 05 23:31:07 crc kubenswrapper[4910]: I0105 23:31:07.113006 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-snkkh"] Jan 05 23:31:08 crc kubenswrapper[4910]: I0105 23:31:08.740949 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="46eee413-9f34-4931-b5f7-b6af4afcaa76" path="/var/lib/kubelet/pods/46eee413-9f34-4931-b5f7-b6af4afcaa76/volumes" Jan 05 23:31:08 crc kubenswrapper[4910]: I0105 23:31:08.742904 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0e6be1b-a676-49ef-93eb-90332cbaed03" path="/var/lib/kubelet/pods/d0e6be1b-a676-49ef-93eb-90332cbaed03/volumes" Jan 05 23:31:10 crc kubenswrapper[4910]: I0105 23:31:10.952568 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 23:31:10 crc kubenswrapper[4910]: I0105 23:31:10.953261 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 23:31:10 crc kubenswrapper[4910]: I0105 23:31:10.953327 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 23:31:10 crc kubenswrapper[4910]: I0105 23:31:10.954447 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f0334e4359831541c2f90c0defd8867b10c126be0235edac4acd76d34a0ba514"} pod="openshift-machine-config-operator/machine-config-daemon-p4t85" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 05 23:31:10 crc kubenswrapper[4910]: I0105 23:31:10.954542 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" containerID="cri-o://f0334e4359831541c2f90c0defd8867b10c126be0235edac4acd76d34a0ba514" gracePeriod=600 Jan 05 23:31:11 crc kubenswrapper[4910]: E0105 23:31:11.089340 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:31:11 crc kubenswrapper[4910]: I0105 23:31:11.844182 4910 generic.go:334] "Generic (PLEG): container finished" podID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerID="f0334e4359831541c2f90c0defd8867b10c126be0235edac4acd76d34a0ba514" exitCode=0 Jan 05 23:31:11 crc kubenswrapper[4910]: I0105 23:31:11.844264 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerDied","Data":"f0334e4359831541c2f90c0defd8867b10c126be0235edac4acd76d34a0ba514"} Jan 05 23:31:11 crc kubenswrapper[4910]: I0105 23:31:11.844476 4910 scope.go:117] "RemoveContainer" containerID="0e240e4effc2bd679e0f96fec5bc054d5530ae8a8dd2bd9c82e2bc521473387b" Jan 05 23:31:11 crc kubenswrapper[4910]: I0105 23:31:11.845933 4910 scope.go:117] "RemoveContainer" containerID="f0334e4359831541c2f90c0defd8867b10c126be0235edac4acd76d34a0ba514" Jan 05 23:31:11 crc kubenswrapper[4910]: E0105 23:31:11.846330 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:31:13 crc kubenswrapper[4910]: I0105 23:31:13.047052 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-sfck6"] Jan 05 23:31:13 crc kubenswrapper[4910]: I0105 23:31:13.056550 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-sfck6"] Jan 05 23:31:14 crc kubenswrapper[4910]: I0105 23:31:14.730950 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc069c6b-45d0-4bca-a3e2-819cc238c46a" path="/var/lib/kubelet/pods/fc069c6b-45d0-4bca-a3e2-819cc238c46a/volumes" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.148941 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-c454dd9b5-768c2"] Jan 05 23:31:23 crc kubenswrapper[4910]: E0105 23:31:23.149838 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08a8538f-8a68-4526-bf6c-4af7abfe43d0" containerName="registry-server" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.149851 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="08a8538f-8a68-4526-bf6c-4af7abfe43d0" containerName="registry-server" Jan 05 23:31:23 crc kubenswrapper[4910]: E0105 23:31:23.149868 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08a8538f-8a68-4526-bf6c-4af7abfe43d0" containerName="extract-content" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.149874 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="08a8538f-8a68-4526-bf6c-4af7abfe43d0" containerName="extract-content" Jan 05 23:31:23 crc kubenswrapper[4910]: E0105 23:31:23.149909 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08a8538f-8a68-4526-bf6c-4af7abfe43d0" containerName="extract-utilities" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.149915 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="08a8538f-8a68-4526-bf6c-4af7abfe43d0" containerName="extract-utilities" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.150112 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="08a8538f-8a68-4526-bf6c-4af7abfe43d0" containerName="registry-server" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.151105 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-c454dd9b5-768c2" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.155513 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.155666 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.156170 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-94mr8" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.156443 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.169839 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-c454dd9b5-768c2"] Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.207386 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.207630 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="7459c264-36f9-4ebb-a162-81373cd02f98" containerName="glance-log" containerID="cri-o://c5487ef6b746df81b6254d06d90f7b6250dd82f9749d5814dfa3283645590f5e" gracePeriod=30 Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.208028 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="7459c264-36f9-4ebb-a162-81373cd02f98" containerName="glance-httpd" containerID="cri-o://dc4dba090d7651413c055c546670049766a065a27e57e4aefcf6a51f23ebe82d" gracePeriod=30 Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.288006 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.288466 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="f69cb6f3-1485-4413-81f5-4de7a3d72609" containerName="glance-log" containerID="cri-o://1a3e747a2e6d1e2dea580be497d7cb58b4675ce09d5d43e27b1d7425d2f89855" gracePeriod=30 Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.288900 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="f69cb6f3-1485-4413-81f5-4de7a3d72609" containerName="glance-httpd" containerID="cri-o://831350d2b60a3338b1015b52db22974f9781c28f010ca524b149391e7ddbd02a" gracePeriod=30 Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.290159 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6b598e3a-4b45-464f-bf19-f029193df5b2-scripts\") pod \"horizon-c454dd9b5-768c2\" (UID: \"6b598e3a-4b45-464f-bf19-f029193df5b2\") " pod="openstack/horizon-c454dd9b5-768c2" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.290229 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6b598e3a-4b45-464f-bf19-f029193df5b2-config-data\") pod \"horizon-c454dd9b5-768c2\" (UID: \"6b598e3a-4b45-464f-bf19-f029193df5b2\") " pod="openstack/horizon-c454dd9b5-768c2" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.290254 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6b598e3a-4b45-464f-bf19-f029193df5b2-horizon-secret-key\") pod \"horizon-c454dd9b5-768c2\" (UID: \"6b598e3a-4b45-464f-bf19-f029193df5b2\") " pod="openstack/horizon-c454dd9b5-768c2" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.290286 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6b598e3a-4b45-464f-bf19-f029193df5b2-logs\") pod \"horizon-c454dd9b5-768c2\" (UID: \"6b598e3a-4b45-464f-bf19-f029193df5b2\") " pod="openstack/horizon-c454dd9b5-768c2" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.290302 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l79v6\" (UniqueName: \"kubernetes.io/projected/6b598e3a-4b45-464f-bf19-f029193df5b2-kube-api-access-l79v6\") pod \"horizon-c454dd9b5-768c2\" (UID: \"6b598e3a-4b45-464f-bf19-f029193df5b2\") " pod="openstack/horizon-c454dd9b5-768c2" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.297282 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-59cbc8f477-dh8x8"] Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.299037 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-59cbc8f477-dh8x8" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.310677 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-59cbc8f477-dh8x8"] Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.392314 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6b598e3a-4b45-464f-bf19-f029193df5b2-logs\") pod \"horizon-c454dd9b5-768c2\" (UID: \"6b598e3a-4b45-464f-bf19-f029193df5b2\") " pod="openstack/horizon-c454dd9b5-768c2" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.392385 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l79v6\" (UniqueName: \"kubernetes.io/projected/6b598e3a-4b45-464f-bf19-f029193df5b2-kube-api-access-l79v6\") pod \"horizon-c454dd9b5-768c2\" (UID: \"6b598e3a-4b45-464f-bf19-f029193df5b2\") " pod="openstack/horizon-c454dd9b5-768c2" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.392479 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f48ced9f-d6fe-47aa-a986-c81a48b3cd4f-horizon-secret-key\") pod \"horizon-59cbc8f477-dh8x8\" (UID: \"f48ced9f-d6fe-47aa-a986-c81a48b3cd4f\") " pod="openstack/horizon-59cbc8f477-dh8x8" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.392515 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f48ced9f-d6fe-47aa-a986-c81a48b3cd4f-scripts\") pod \"horizon-59cbc8f477-dh8x8\" (UID: \"f48ced9f-d6fe-47aa-a986-c81a48b3cd4f\") " pod="openstack/horizon-59cbc8f477-dh8x8" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.392557 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6b598e3a-4b45-464f-bf19-f029193df5b2-scripts\") pod \"horizon-c454dd9b5-768c2\" (UID: \"6b598e3a-4b45-464f-bf19-f029193df5b2\") " pod="openstack/horizon-c454dd9b5-768c2" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.392954 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6b598e3a-4b45-464f-bf19-f029193df5b2-logs\") pod \"horizon-c454dd9b5-768c2\" (UID: \"6b598e3a-4b45-464f-bf19-f029193df5b2\") " pod="openstack/horizon-c454dd9b5-768c2" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.392961 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f48ced9f-d6fe-47aa-a986-c81a48b3cd4f-config-data\") pod \"horizon-59cbc8f477-dh8x8\" (UID: \"f48ced9f-d6fe-47aa-a986-c81a48b3cd4f\") " pod="openstack/horizon-59cbc8f477-dh8x8" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.393065 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cdvlz\" (UniqueName: \"kubernetes.io/projected/f48ced9f-d6fe-47aa-a986-c81a48b3cd4f-kube-api-access-cdvlz\") pod \"horizon-59cbc8f477-dh8x8\" (UID: \"f48ced9f-d6fe-47aa-a986-c81a48b3cd4f\") " pod="openstack/horizon-59cbc8f477-dh8x8" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.393106 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6b598e3a-4b45-464f-bf19-f029193df5b2-config-data\") pod \"horizon-c454dd9b5-768c2\" (UID: \"6b598e3a-4b45-464f-bf19-f029193df5b2\") " pod="openstack/horizon-c454dd9b5-768c2" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.393191 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6b598e3a-4b45-464f-bf19-f029193df5b2-horizon-secret-key\") pod \"horizon-c454dd9b5-768c2\" (UID: \"6b598e3a-4b45-464f-bf19-f029193df5b2\") " pod="openstack/horizon-c454dd9b5-768c2" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.393233 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f48ced9f-d6fe-47aa-a986-c81a48b3cd4f-logs\") pod \"horizon-59cbc8f477-dh8x8\" (UID: \"f48ced9f-d6fe-47aa-a986-c81a48b3cd4f\") " pod="openstack/horizon-59cbc8f477-dh8x8" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.393283 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6b598e3a-4b45-464f-bf19-f029193df5b2-scripts\") pod \"horizon-c454dd9b5-768c2\" (UID: \"6b598e3a-4b45-464f-bf19-f029193df5b2\") " pod="openstack/horizon-c454dd9b5-768c2" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.394318 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6b598e3a-4b45-464f-bf19-f029193df5b2-config-data\") pod \"horizon-c454dd9b5-768c2\" (UID: \"6b598e3a-4b45-464f-bf19-f029193df5b2\") " pod="openstack/horizon-c454dd9b5-768c2" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.403983 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6b598e3a-4b45-464f-bf19-f029193df5b2-horizon-secret-key\") pod \"horizon-c454dd9b5-768c2\" (UID: \"6b598e3a-4b45-464f-bf19-f029193df5b2\") " pod="openstack/horizon-c454dd9b5-768c2" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.411865 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l79v6\" (UniqueName: \"kubernetes.io/projected/6b598e3a-4b45-464f-bf19-f029193df5b2-kube-api-access-l79v6\") pod \"horizon-c454dd9b5-768c2\" (UID: \"6b598e3a-4b45-464f-bf19-f029193df5b2\") " pod="openstack/horizon-c454dd9b5-768c2" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.475650 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-c454dd9b5-768c2" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.494738 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f48ced9f-d6fe-47aa-a986-c81a48b3cd4f-config-data\") pod \"horizon-59cbc8f477-dh8x8\" (UID: \"f48ced9f-d6fe-47aa-a986-c81a48b3cd4f\") " pod="openstack/horizon-59cbc8f477-dh8x8" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.494799 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cdvlz\" (UniqueName: \"kubernetes.io/projected/f48ced9f-d6fe-47aa-a986-c81a48b3cd4f-kube-api-access-cdvlz\") pod \"horizon-59cbc8f477-dh8x8\" (UID: \"f48ced9f-d6fe-47aa-a986-c81a48b3cd4f\") " pod="openstack/horizon-59cbc8f477-dh8x8" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.494839 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f48ced9f-d6fe-47aa-a986-c81a48b3cd4f-logs\") pod \"horizon-59cbc8f477-dh8x8\" (UID: \"f48ced9f-d6fe-47aa-a986-c81a48b3cd4f\") " pod="openstack/horizon-59cbc8f477-dh8x8" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.494902 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f48ced9f-d6fe-47aa-a986-c81a48b3cd4f-horizon-secret-key\") pod \"horizon-59cbc8f477-dh8x8\" (UID: \"f48ced9f-d6fe-47aa-a986-c81a48b3cd4f\") " pod="openstack/horizon-59cbc8f477-dh8x8" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.494925 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f48ced9f-d6fe-47aa-a986-c81a48b3cd4f-scripts\") pod \"horizon-59cbc8f477-dh8x8\" (UID: \"f48ced9f-d6fe-47aa-a986-c81a48b3cd4f\") " pod="openstack/horizon-59cbc8f477-dh8x8" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.495613 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f48ced9f-d6fe-47aa-a986-c81a48b3cd4f-scripts\") pod \"horizon-59cbc8f477-dh8x8\" (UID: \"f48ced9f-d6fe-47aa-a986-c81a48b3cd4f\") " pod="openstack/horizon-59cbc8f477-dh8x8" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.496466 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f48ced9f-d6fe-47aa-a986-c81a48b3cd4f-config-data\") pod \"horizon-59cbc8f477-dh8x8\" (UID: \"f48ced9f-d6fe-47aa-a986-c81a48b3cd4f\") " pod="openstack/horizon-59cbc8f477-dh8x8" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.496958 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f48ced9f-d6fe-47aa-a986-c81a48b3cd4f-logs\") pod \"horizon-59cbc8f477-dh8x8\" (UID: \"f48ced9f-d6fe-47aa-a986-c81a48b3cd4f\") " pod="openstack/horizon-59cbc8f477-dh8x8" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.507677 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f48ced9f-d6fe-47aa-a986-c81a48b3cd4f-horizon-secret-key\") pod \"horizon-59cbc8f477-dh8x8\" (UID: \"f48ced9f-d6fe-47aa-a986-c81a48b3cd4f\") " pod="openstack/horizon-59cbc8f477-dh8x8" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.524660 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cdvlz\" (UniqueName: \"kubernetes.io/projected/f48ced9f-d6fe-47aa-a986-c81a48b3cd4f-kube-api-access-cdvlz\") pod \"horizon-59cbc8f477-dh8x8\" (UID: \"f48ced9f-d6fe-47aa-a986-c81a48b3cd4f\") " pod="openstack/horizon-59cbc8f477-dh8x8" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.668832 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-59cbc8f477-dh8x8" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.786888 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-6gps6"] Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.789213 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6gps6" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.800052 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6gps6"] Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.908633 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3b67709-a5e7-459a-87b0-d7d266ab60dd-catalog-content\") pod \"redhat-operators-6gps6\" (UID: \"d3b67709-a5e7-459a-87b0-d7d266ab60dd\") " pod="openshift-marketplace/redhat-operators-6gps6" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.908766 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-485d4\" (UniqueName: \"kubernetes.io/projected/d3b67709-a5e7-459a-87b0-d7d266ab60dd-kube-api-access-485d4\") pod \"redhat-operators-6gps6\" (UID: \"d3b67709-a5e7-459a-87b0-d7d266ab60dd\") " pod="openshift-marketplace/redhat-operators-6gps6" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.908824 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3b67709-a5e7-459a-87b0-d7d266ab60dd-utilities\") pod \"redhat-operators-6gps6\" (UID: \"d3b67709-a5e7-459a-87b0-d7d266ab60dd\") " pod="openshift-marketplace/redhat-operators-6gps6" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.937969 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-59cbc8f477-dh8x8"] Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.988402 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-8585df8647-wlz5f"] Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.990017 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-8585df8647-wlz5f" Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.999056 4910 generic.go:334] "Generic (PLEG): container finished" podID="7459c264-36f9-4ebb-a162-81373cd02f98" containerID="c5487ef6b746df81b6254d06d90f7b6250dd82f9749d5814dfa3283645590f5e" exitCode=143 Jan 05 23:31:23 crc kubenswrapper[4910]: I0105 23:31:23.999165 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"7459c264-36f9-4ebb-a162-81373cd02f98","Type":"ContainerDied","Data":"c5487ef6b746df81b6254d06d90f7b6250dd82f9749d5814dfa3283645590f5e"} Jan 05 23:31:24 crc kubenswrapper[4910]: I0105 23:31:24.002757 4910 generic.go:334] "Generic (PLEG): container finished" podID="f69cb6f3-1485-4413-81f5-4de7a3d72609" containerID="1a3e747a2e6d1e2dea580be497d7cb58b4675ce09d5d43e27b1d7425d2f89855" exitCode=143 Jan 05 23:31:24 crc kubenswrapper[4910]: I0105 23:31:24.002784 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f69cb6f3-1485-4413-81f5-4de7a3d72609","Type":"ContainerDied","Data":"1a3e747a2e6d1e2dea580be497d7cb58b4675ce09d5d43e27b1d7425d2f89855"} Jan 05 23:31:24 crc kubenswrapper[4910]: I0105 23:31:24.011493 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-485d4\" (UniqueName: \"kubernetes.io/projected/d3b67709-a5e7-459a-87b0-d7d266ab60dd-kube-api-access-485d4\") pod \"redhat-operators-6gps6\" (UID: \"d3b67709-a5e7-459a-87b0-d7d266ab60dd\") " pod="openshift-marketplace/redhat-operators-6gps6" Jan 05 23:31:24 crc kubenswrapper[4910]: I0105 23:31:24.011602 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3b67709-a5e7-459a-87b0-d7d266ab60dd-utilities\") pod \"redhat-operators-6gps6\" (UID: \"d3b67709-a5e7-459a-87b0-d7d266ab60dd\") " pod="openshift-marketplace/redhat-operators-6gps6" Jan 05 23:31:24 crc kubenswrapper[4910]: I0105 23:31:24.011711 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3b67709-a5e7-459a-87b0-d7d266ab60dd-catalog-content\") pod \"redhat-operators-6gps6\" (UID: \"d3b67709-a5e7-459a-87b0-d7d266ab60dd\") " pod="openshift-marketplace/redhat-operators-6gps6" Jan 05 23:31:24 crc kubenswrapper[4910]: I0105 23:31:24.012310 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3b67709-a5e7-459a-87b0-d7d266ab60dd-utilities\") pod \"redhat-operators-6gps6\" (UID: \"d3b67709-a5e7-459a-87b0-d7d266ab60dd\") " pod="openshift-marketplace/redhat-operators-6gps6" Jan 05 23:31:24 crc kubenswrapper[4910]: I0105 23:31:24.012390 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3b67709-a5e7-459a-87b0-d7d266ab60dd-catalog-content\") pod \"redhat-operators-6gps6\" (UID: \"d3b67709-a5e7-459a-87b0-d7d266ab60dd\") " pod="openshift-marketplace/redhat-operators-6gps6" Jan 05 23:31:24 crc kubenswrapper[4910]: I0105 23:31:24.016769 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-8585df8647-wlz5f"] Jan 05 23:31:24 crc kubenswrapper[4910]: I0105 23:31:24.034277 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-485d4\" (UniqueName: \"kubernetes.io/projected/d3b67709-a5e7-459a-87b0-d7d266ab60dd-kube-api-access-485d4\") pod \"redhat-operators-6gps6\" (UID: \"d3b67709-a5e7-459a-87b0-d7d266ab60dd\") " pod="openshift-marketplace/redhat-operators-6gps6" Jan 05 23:31:24 crc kubenswrapper[4910]: I0105 23:31:24.113103 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6gps6" Jan 05 23:31:24 crc kubenswrapper[4910]: I0105 23:31:24.114111 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nw8t5\" (UniqueName: \"kubernetes.io/projected/226256c6-0132-469c-af61-bf062ea41762-kube-api-access-nw8t5\") pod \"horizon-8585df8647-wlz5f\" (UID: \"226256c6-0132-469c-af61-bf062ea41762\") " pod="openstack/horizon-8585df8647-wlz5f" Jan 05 23:31:24 crc kubenswrapper[4910]: I0105 23:31:24.114237 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/226256c6-0132-469c-af61-bf062ea41762-config-data\") pod \"horizon-8585df8647-wlz5f\" (UID: \"226256c6-0132-469c-af61-bf062ea41762\") " pod="openstack/horizon-8585df8647-wlz5f" Jan 05 23:31:24 crc kubenswrapper[4910]: I0105 23:31:24.114277 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/226256c6-0132-469c-af61-bf062ea41762-logs\") pod \"horizon-8585df8647-wlz5f\" (UID: \"226256c6-0132-469c-af61-bf062ea41762\") " pod="openstack/horizon-8585df8647-wlz5f" Jan 05 23:31:24 crc kubenswrapper[4910]: I0105 23:31:24.114299 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/226256c6-0132-469c-af61-bf062ea41762-horizon-secret-key\") pod \"horizon-8585df8647-wlz5f\" (UID: \"226256c6-0132-469c-af61-bf062ea41762\") " pod="openstack/horizon-8585df8647-wlz5f" Jan 05 23:31:24 crc kubenswrapper[4910]: I0105 23:31:24.114383 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/226256c6-0132-469c-af61-bf062ea41762-scripts\") pod \"horizon-8585df8647-wlz5f\" (UID: \"226256c6-0132-469c-af61-bf062ea41762\") " pod="openstack/horizon-8585df8647-wlz5f" Jan 05 23:31:24 crc kubenswrapper[4910]: I0105 23:31:24.174152 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-c454dd9b5-768c2"] Jan 05 23:31:24 crc kubenswrapper[4910]: I0105 23:31:24.216845 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nw8t5\" (UniqueName: \"kubernetes.io/projected/226256c6-0132-469c-af61-bf062ea41762-kube-api-access-nw8t5\") pod \"horizon-8585df8647-wlz5f\" (UID: \"226256c6-0132-469c-af61-bf062ea41762\") " pod="openstack/horizon-8585df8647-wlz5f" Jan 05 23:31:24 crc kubenswrapper[4910]: I0105 23:31:24.217114 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/226256c6-0132-469c-af61-bf062ea41762-config-data\") pod \"horizon-8585df8647-wlz5f\" (UID: \"226256c6-0132-469c-af61-bf062ea41762\") " pod="openstack/horizon-8585df8647-wlz5f" Jan 05 23:31:24 crc kubenswrapper[4910]: I0105 23:31:24.217163 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/226256c6-0132-469c-af61-bf062ea41762-logs\") pod \"horizon-8585df8647-wlz5f\" (UID: \"226256c6-0132-469c-af61-bf062ea41762\") " pod="openstack/horizon-8585df8647-wlz5f" Jan 05 23:31:24 crc kubenswrapper[4910]: I0105 23:31:24.217194 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/226256c6-0132-469c-af61-bf062ea41762-horizon-secret-key\") pod \"horizon-8585df8647-wlz5f\" (UID: \"226256c6-0132-469c-af61-bf062ea41762\") " pod="openstack/horizon-8585df8647-wlz5f" Jan 05 23:31:24 crc kubenswrapper[4910]: I0105 23:31:24.217221 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/226256c6-0132-469c-af61-bf062ea41762-scripts\") pod \"horizon-8585df8647-wlz5f\" (UID: \"226256c6-0132-469c-af61-bf062ea41762\") " pod="openstack/horizon-8585df8647-wlz5f" Jan 05 23:31:24 crc kubenswrapper[4910]: I0105 23:31:24.218177 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/226256c6-0132-469c-af61-bf062ea41762-scripts\") pod \"horizon-8585df8647-wlz5f\" (UID: \"226256c6-0132-469c-af61-bf062ea41762\") " pod="openstack/horizon-8585df8647-wlz5f" Jan 05 23:31:24 crc kubenswrapper[4910]: I0105 23:31:24.218371 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/226256c6-0132-469c-af61-bf062ea41762-config-data\") pod \"horizon-8585df8647-wlz5f\" (UID: \"226256c6-0132-469c-af61-bf062ea41762\") " pod="openstack/horizon-8585df8647-wlz5f" Jan 05 23:31:24 crc kubenswrapper[4910]: I0105 23:31:24.218649 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/226256c6-0132-469c-af61-bf062ea41762-logs\") pod \"horizon-8585df8647-wlz5f\" (UID: \"226256c6-0132-469c-af61-bf062ea41762\") " pod="openstack/horizon-8585df8647-wlz5f" Jan 05 23:31:24 crc kubenswrapper[4910]: I0105 23:31:24.225417 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/226256c6-0132-469c-af61-bf062ea41762-horizon-secret-key\") pod \"horizon-8585df8647-wlz5f\" (UID: \"226256c6-0132-469c-af61-bf062ea41762\") " pod="openstack/horizon-8585df8647-wlz5f" Jan 05 23:31:24 crc kubenswrapper[4910]: I0105 23:31:24.238680 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nw8t5\" (UniqueName: \"kubernetes.io/projected/226256c6-0132-469c-af61-bf062ea41762-kube-api-access-nw8t5\") pod \"horizon-8585df8647-wlz5f\" (UID: \"226256c6-0132-469c-af61-bf062ea41762\") " pod="openstack/horizon-8585df8647-wlz5f" Jan 05 23:31:24 crc kubenswrapper[4910]: I0105 23:31:24.308826 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-59cbc8f477-dh8x8"] Jan 05 23:31:24 crc kubenswrapper[4910]: I0105 23:31:24.319151 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-8585df8647-wlz5f" Jan 05 23:31:24 crc kubenswrapper[4910]: I0105 23:31:24.591365 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6gps6"] Jan 05 23:31:24 crc kubenswrapper[4910]: I0105 23:31:24.722196 4910 scope.go:117] "RemoveContainer" containerID="f0334e4359831541c2f90c0defd8867b10c126be0235edac4acd76d34a0ba514" Jan 05 23:31:24 crc kubenswrapper[4910]: E0105 23:31:24.722691 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:31:24 crc kubenswrapper[4910]: I0105 23:31:24.783484 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-8585df8647-wlz5f"] Jan 05 23:31:24 crc kubenswrapper[4910]: W0105 23:31:24.787829 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod226256c6_0132_469c_af61_bf062ea41762.slice/crio-f42e1d4ac6f2be39117cc2e8b80996b7ff392a4eebbaec643ab81c77265f41c9 WatchSource:0}: Error finding container f42e1d4ac6f2be39117cc2e8b80996b7ff392a4eebbaec643ab81c77265f41c9: Status 404 returned error can't find the container with id f42e1d4ac6f2be39117cc2e8b80996b7ff392a4eebbaec643ab81c77265f41c9 Jan 05 23:31:25 crc kubenswrapper[4910]: I0105 23:31:25.018628 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-59cbc8f477-dh8x8" event={"ID":"f48ced9f-d6fe-47aa-a986-c81a48b3cd4f","Type":"ContainerStarted","Data":"73c1e44c28fd96ff732cde70379d4cb8d8f7c3f3e24e5656de636b33d0471cd8"} Jan 05 23:31:25 crc kubenswrapper[4910]: I0105 23:31:25.020880 4910 generic.go:334] "Generic (PLEG): container finished" podID="d3b67709-a5e7-459a-87b0-d7d266ab60dd" containerID="f47b563c368921c9a61d0315f447127ea1ffc00e9ce791d3f9d9783908f819e7" exitCode=0 Jan 05 23:31:25 crc kubenswrapper[4910]: I0105 23:31:25.020956 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6gps6" event={"ID":"d3b67709-a5e7-459a-87b0-d7d266ab60dd","Type":"ContainerDied","Data":"f47b563c368921c9a61d0315f447127ea1ffc00e9ce791d3f9d9783908f819e7"} Jan 05 23:31:25 crc kubenswrapper[4910]: I0105 23:31:25.020993 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6gps6" event={"ID":"d3b67709-a5e7-459a-87b0-d7d266ab60dd","Type":"ContainerStarted","Data":"aae11ec25e7c776a56c1cdc1af899f96f0fadc6ac396811ba992a065d6ae499c"} Jan 05 23:31:25 crc kubenswrapper[4910]: I0105 23:31:25.025835 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-c454dd9b5-768c2" event={"ID":"6b598e3a-4b45-464f-bf19-f029193df5b2","Type":"ContainerStarted","Data":"f23bea47b1c56f73aae769a8f3eee530fd0ab536ffc2c3421fc0ccde82230e34"} Jan 05 23:31:25 crc kubenswrapper[4910]: I0105 23:31:25.044782 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8585df8647-wlz5f" event={"ID":"226256c6-0132-469c-af61-bf062ea41762","Type":"ContainerStarted","Data":"f42e1d4ac6f2be39117cc2e8b80996b7ff392a4eebbaec643ab81c77265f41c9"} Jan 05 23:31:26 crc kubenswrapper[4910]: I0105 23:31:26.055132 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6gps6" event={"ID":"d3b67709-a5e7-459a-87b0-d7d266ab60dd","Type":"ContainerStarted","Data":"0bab43a594261ee1d947c33a9fdf176c56e83e6212217d67c45dd6d3195d6e30"} Jan 05 23:31:26 crc kubenswrapper[4910]: I0105 23:31:26.897680 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.059091 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.078711 4910 generic.go:334] "Generic (PLEG): container finished" podID="d3b67709-a5e7-459a-87b0-d7d266ab60dd" containerID="0bab43a594261ee1d947c33a9fdf176c56e83e6212217d67c45dd6d3195d6e30" exitCode=0 Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.078786 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6gps6" event={"ID":"d3b67709-a5e7-459a-87b0-d7d266ab60dd","Type":"ContainerDied","Data":"0bab43a594261ee1d947c33a9fdf176c56e83e6212217d67c45dd6d3195d6e30"} Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.094523 4910 generic.go:334] "Generic (PLEG): container finished" podID="7459c264-36f9-4ebb-a162-81373cd02f98" containerID="dc4dba090d7651413c055c546670049766a065a27e57e4aefcf6a51f23ebe82d" exitCode=0 Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.094584 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"7459c264-36f9-4ebb-a162-81373cd02f98","Type":"ContainerDied","Data":"dc4dba090d7651413c055c546670049766a065a27e57e4aefcf6a51f23ebe82d"} Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.094619 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"7459c264-36f9-4ebb-a162-81373cd02f98","Type":"ContainerDied","Data":"ec5dcfb21704f4b1be94c5c8db9a13f4ef9b098f3186417dea7bae3702f871ef"} Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.094637 4910 scope.go:117] "RemoveContainer" containerID="dc4dba090d7651413c055c546670049766a065a27e57e4aefcf6a51f23ebe82d" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.094773 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.097085 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7459c264-36f9-4ebb-a162-81373cd02f98-combined-ca-bundle\") pod \"7459c264-36f9-4ebb-a162-81373cd02f98\" (UID: \"7459c264-36f9-4ebb-a162-81373cd02f98\") " Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.097165 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7459c264-36f9-4ebb-a162-81373cd02f98-httpd-run\") pod \"7459c264-36f9-4ebb-a162-81373cd02f98\" (UID: \"7459c264-36f9-4ebb-a162-81373cd02f98\") " Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.097297 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7459c264-36f9-4ebb-a162-81373cd02f98-logs\") pod \"7459c264-36f9-4ebb-a162-81373cd02f98\" (UID: \"7459c264-36f9-4ebb-a162-81373cd02f98\") " Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.097339 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7459c264-36f9-4ebb-a162-81373cd02f98-config-data\") pod \"7459c264-36f9-4ebb-a162-81373cd02f98\" (UID: \"7459c264-36f9-4ebb-a162-81373cd02f98\") " Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.097375 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pfznl\" (UniqueName: \"kubernetes.io/projected/7459c264-36f9-4ebb-a162-81373cd02f98-kube-api-access-pfznl\") pod \"7459c264-36f9-4ebb-a162-81373cd02f98\" (UID: \"7459c264-36f9-4ebb-a162-81373cd02f98\") " Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.097432 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/7459c264-36f9-4ebb-a162-81373cd02f98-ceph\") pod \"7459c264-36f9-4ebb-a162-81373cd02f98\" (UID: \"7459c264-36f9-4ebb-a162-81373cd02f98\") " Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.097506 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7459c264-36f9-4ebb-a162-81373cd02f98-scripts\") pod \"7459c264-36f9-4ebb-a162-81373cd02f98\" (UID: \"7459c264-36f9-4ebb-a162-81373cd02f98\") " Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.101313 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7459c264-36f9-4ebb-a162-81373cd02f98-logs" (OuterVolumeSpecName: "logs") pod "7459c264-36f9-4ebb-a162-81373cd02f98" (UID: "7459c264-36f9-4ebb-a162-81373cd02f98"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.102916 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7459c264-36f9-4ebb-a162-81373cd02f98-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "7459c264-36f9-4ebb-a162-81373cd02f98" (UID: "7459c264-36f9-4ebb-a162-81373cd02f98"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.109388 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7459c264-36f9-4ebb-a162-81373cd02f98-kube-api-access-pfznl" (OuterVolumeSpecName: "kube-api-access-pfznl") pod "7459c264-36f9-4ebb-a162-81373cd02f98" (UID: "7459c264-36f9-4ebb-a162-81373cd02f98"). InnerVolumeSpecName "kube-api-access-pfznl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.112986 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7459c264-36f9-4ebb-a162-81373cd02f98-scripts" (OuterVolumeSpecName: "scripts") pod "7459c264-36f9-4ebb-a162-81373cd02f98" (UID: "7459c264-36f9-4ebb-a162-81373cd02f98"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.115561 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7459c264-36f9-4ebb-a162-81373cd02f98-ceph" (OuterVolumeSpecName: "ceph") pod "7459c264-36f9-4ebb-a162-81373cd02f98" (UID: "7459c264-36f9-4ebb-a162-81373cd02f98"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.118069 4910 generic.go:334] "Generic (PLEG): container finished" podID="f69cb6f3-1485-4413-81f5-4de7a3d72609" containerID="831350d2b60a3338b1015b52db22974f9781c28f010ca524b149391e7ddbd02a" exitCode=0 Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.118110 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f69cb6f3-1485-4413-81f5-4de7a3d72609","Type":"ContainerDied","Data":"831350d2b60a3338b1015b52db22974f9781c28f010ca524b149391e7ddbd02a"} Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.118153 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f69cb6f3-1485-4413-81f5-4de7a3d72609","Type":"ContainerDied","Data":"fcd222523118aee142990a36de6a0bc6d95a2e88e525012fe4354d258f05b815"} Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.118255 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.144459 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7459c264-36f9-4ebb-a162-81373cd02f98-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7459c264-36f9-4ebb-a162-81373cd02f98" (UID: "7459c264-36f9-4ebb-a162-81373cd02f98"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.159254 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7459c264-36f9-4ebb-a162-81373cd02f98-config-data" (OuterVolumeSpecName: "config-data") pod "7459c264-36f9-4ebb-a162-81373cd02f98" (UID: "7459c264-36f9-4ebb-a162-81373cd02f98"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.199018 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xdmr4\" (UniqueName: \"kubernetes.io/projected/f69cb6f3-1485-4413-81f5-4de7a3d72609-kube-api-access-xdmr4\") pod \"f69cb6f3-1485-4413-81f5-4de7a3d72609\" (UID: \"f69cb6f3-1485-4413-81f5-4de7a3d72609\") " Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.199065 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f69cb6f3-1485-4413-81f5-4de7a3d72609-logs\") pod \"f69cb6f3-1485-4413-81f5-4de7a3d72609\" (UID: \"f69cb6f3-1485-4413-81f5-4de7a3d72609\") " Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.199088 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f69cb6f3-1485-4413-81f5-4de7a3d72609-httpd-run\") pod \"f69cb6f3-1485-4413-81f5-4de7a3d72609\" (UID: \"f69cb6f3-1485-4413-81f5-4de7a3d72609\") " Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.199190 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/f69cb6f3-1485-4413-81f5-4de7a3d72609-ceph\") pod \"f69cb6f3-1485-4413-81f5-4de7a3d72609\" (UID: \"f69cb6f3-1485-4413-81f5-4de7a3d72609\") " Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.199913 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f69cb6f3-1485-4413-81f5-4de7a3d72609-scripts\") pod \"f69cb6f3-1485-4413-81f5-4de7a3d72609\" (UID: \"f69cb6f3-1485-4413-81f5-4de7a3d72609\") " Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.199924 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f69cb6f3-1485-4413-81f5-4de7a3d72609-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "f69cb6f3-1485-4413-81f5-4de7a3d72609" (UID: "f69cb6f3-1485-4413-81f5-4de7a3d72609"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.199940 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f69cb6f3-1485-4413-81f5-4de7a3d72609-config-data\") pod \"f69cb6f3-1485-4413-81f5-4de7a3d72609\" (UID: \"f69cb6f3-1485-4413-81f5-4de7a3d72609\") " Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.200195 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f69cb6f3-1485-4413-81f5-4de7a3d72609-combined-ca-bundle\") pod \"f69cb6f3-1485-4413-81f5-4de7a3d72609\" (UID: \"f69cb6f3-1485-4413-81f5-4de7a3d72609\") " Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.200340 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f69cb6f3-1485-4413-81f5-4de7a3d72609-logs" (OuterVolumeSpecName: "logs") pod "f69cb6f3-1485-4413-81f5-4de7a3d72609" (UID: "f69cb6f3-1485-4413-81f5-4de7a3d72609"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.201079 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f69cb6f3-1485-4413-81f5-4de7a3d72609-logs\") on node \"crc\" DevicePath \"\"" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.201098 4910 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f69cb6f3-1485-4413-81f5-4de7a3d72609-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.201108 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7459c264-36f9-4ebb-a162-81373cd02f98-logs\") on node \"crc\" DevicePath \"\"" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.201118 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7459c264-36f9-4ebb-a162-81373cd02f98-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.201127 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pfznl\" (UniqueName: \"kubernetes.io/projected/7459c264-36f9-4ebb-a162-81373cd02f98-kube-api-access-pfznl\") on node \"crc\" DevicePath \"\"" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.201175 4910 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/7459c264-36f9-4ebb-a162-81373cd02f98-ceph\") on node \"crc\" DevicePath \"\"" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.201183 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7459c264-36f9-4ebb-a162-81373cd02f98-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.201191 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7459c264-36f9-4ebb-a162-81373cd02f98-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.201198 4910 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7459c264-36f9-4ebb-a162-81373cd02f98-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.204248 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f69cb6f3-1485-4413-81f5-4de7a3d72609-ceph" (OuterVolumeSpecName: "ceph") pod "f69cb6f3-1485-4413-81f5-4de7a3d72609" (UID: "f69cb6f3-1485-4413-81f5-4de7a3d72609"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.204253 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f69cb6f3-1485-4413-81f5-4de7a3d72609-scripts" (OuterVolumeSpecName: "scripts") pod "f69cb6f3-1485-4413-81f5-4de7a3d72609" (UID: "f69cb6f3-1485-4413-81f5-4de7a3d72609"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.205254 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f69cb6f3-1485-4413-81f5-4de7a3d72609-kube-api-access-xdmr4" (OuterVolumeSpecName: "kube-api-access-xdmr4") pod "f69cb6f3-1485-4413-81f5-4de7a3d72609" (UID: "f69cb6f3-1485-4413-81f5-4de7a3d72609"). InnerVolumeSpecName "kube-api-access-xdmr4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.217821 4910 scope.go:117] "RemoveContainer" containerID="c5487ef6b746df81b6254d06d90f7b6250dd82f9749d5814dfa3283645590f5e" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.225833 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f69cb6f3-1485-4413-81f5-4de7a3d72609-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f69cb6f3-1485-4413-81f5-4de7a3d72609" (UID: "f69cb6f3-1485-4413-81f5-4de7a3d72609"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.256248 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f69cb6f3-1485-4413-81f5-4de7a3d72609-config-data" (OuterVolumeSpecName: "config-data") pod "f69cb6f3-1485-4413-81f5-4de7a3d72609" (UID: "f69cb6f3-1485-4413-81f5-4de7a3d72609"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.267516 4910 scope.go:117] "RemoveContainer" containerID="dc4dba090d7651413c055c546670049766a065a27e57e4aefcf6a51f23ebe82d" Jan 05 23:31:27 crc kubenswrapper[4910]: E0105 23:31:27.268173 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc4dba090d7651413c055c546670049766a065a27e57e4aefcf6a51f23ebe82d\": container with ID starting with dc4dba090d7651413c055c546670049766a065a27e57e4aefcf6a51f23ebe82d not found: ID does not exist" containerID="dc4dba090d7651413c055c546670049766a065a27e57e4aefcf6a51f23ebe82d" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.268201 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc4dba090d7651413c055c546670049766a065a27e57e4aefcf6a51f23ebe82d"} err="failed to get container status \"dc4dba090d7651413c055c546670049766a065a27e57e4aefcf6a51f23ebe82d\": rpc error: code = NotFound desc = could not find container \"dc4dba090d7651413c055c546670049766a065a27e57e4aefcf6a51f23ebe82d\": container with ID starting with dc4dba090d7651413c055c546670049766a065a27e57e4aefcf6a51f23ebe82d not found: ID does not exist" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.268223 4910 scope.go:117] "RemoveContainer" containerID="c5487ef6b746df81b6254d06d90f7b6250dd82f9749d5814dfa3283645590f5e" Jan 05 23:31:27 crc kubenswrapper[4910]: E0105 23:31:27.268474 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c5487ef6b746df81b6254d06d90f7b6250dd82f9749d5814dfa3283645590f5e\": container with ID starting with c5487ef6b746df81b6254d06d90f7b6250dd82f9749d5814dfa3283645590f5e not found: ID does not exist" containerID="c5487ef6b746df81b6254d06d90f7b6250dd82f9749d5814dfa3283645590f5e" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.268494 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c5487ef6b746df81b6254d06d90f7b6250dd82f9749d5814dfa3283645590f5e"} err="failed to get container status \"c5487ef6b746df81b6254d06d90f7b6250dd82f9749d5814dfa3283645590f5e\": rpc error: code = NotFound desc = could not find container \"c5487ef6b746df81b6254d06d90f7b6250dd82f9749d5814dfa3283645590f5e\": container with ID starting with c5487ef6b746df81b6254d06d90f7b6250dd82f9749d5814dfa3283645590f5e not found: ID does not exist" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.268506 4910 scope.go:117] "RemoveContainer" containerID="831350d2b60a3338b1015b52db22974f9781c28f010ca524b149391e7ddbd02a" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.303245 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f69cb6f3-1485-4413-81f5-4de7a3d72609-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.303265 4910 scope.go:117] "RemoveContainer" containerID="1a3e747a2e6d1e2dea580be497d7cb58b4675ce09d5d43e27b1d7425d2f89855" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.303277 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xdmr4\" (UniqueName: \"kubernetes.io/projected/f69cb6f3-1485-4413-81f5-4de7a3d72609-kube-api-access-xdmr4\") on node \"crc\" DevicePath \"\"" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.303393 4910 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/f69cb6f3-1485-4413-81f5-4de7a3d72609-ceph\") on node \"crc\" DevicePath \"\"" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.303404 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f69cb6f3-1485-4413-81f5-4de7a3d72609-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.303415 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f69cb6f3-1485-4413-81f5-4de7a3d72609-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.333544 4910 scope.go:117] "RemoveContainer" containerID="831350d2b60a3338b1015b52db22974f9781c28f010ca524b149391e7ddbd02a" Jan 05 23:31:27 crc kubenswrapper[4910]: E0105 23:31:27.334404 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"831350d2b60a3338b1015b52db22974f9781c28f010ca524b149391e7ddbd02a\": container with ID starting with 831350d2b60a3338b1015b52db22974f9781c28f010ca524b149391e7ddbd02a not found: ID does not exist" containerID="831350d2b60a3338b1015b52db22974f9781c28f010ca524b149391e7ddbd02a" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.334466 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"831350d2b60a3338b1015b52db22974f9781c28f010ca524b149391e7ddbd02a"} err="failed to get container status \"831350d2b60a3338b1015b52db22974f9781c28f010ca524b149391e7ddbd02a\": rpc error: code = NotFound desc = could not find container \"831350d2b60a3338b1015b52db22974f9781c28f010ca524b149391e7ddbd02a\": container with ID starting with 831350d2b60a3338b1015b52db22974f9781c28f010ca524b149391e7ddbd02a not found: ID does not exist" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.334490 4910 scope.go:117] "RemoveContainer" containerID="1a3e747a2e6d1e2dea580be497d7cb58b4675ce09d5d43e27b1d7425d2f89855" Jan 05 23:31:27 crc kubenswrapper[4910]: E0105 23:31:27.335022 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1a3e747a2e6d1e2dea580be497d7cb58b4675ce09d5d43e27b1d7425d2f89855\": container with ID starting with 1a3e747a2e6d1e2dea580be497d7cb58b4675ce09d5d43e27b1d7425d2f89855 not found: ID does not exist" containerID="1a3e747a2e6d1e2dea580be497d7cb58b4675ce09d5d43e27b1d7425d2f89855" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.335077 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1a3e747a2e6d1e2dea580be497d7cb58b4675ce09d5d43e27b1d7425d2f89855"} err="failed to get container status \"1a3e747a2e6d1e2dea580be497d7cb58b4675ce09d5d43e27b1d7425d2f89855\": rpc error: code = NotFound desc = could not find container \"1a3e747a2e6d1e2dea580be497d7cb58b4675ce09d5d43e27b1d7425d2f89855\": container with ID starting with 1a3e747a2e6d1e2dea580be497d7cb58b4675ce09d5d43e27b1d7425d2f89855 not found: ID does not exist" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.432323 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.442009 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.469404 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 23:31:27 crc kubenswrapper[4910]: E0105 23:31:27.469899 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7459c264-36f9-4ebb-a162-81373cd02f98" containerName="glance-log" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.469919 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="7459c264-36f9-4ebb-a162-81373cd02f98" containerName="glance-log" Jan 05 23:31:27 crc kubenswrapper[4910]: E0105 23:31:27.469943 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f69cb6f3-1485-4413-81f5-4de7a3d72609" containerName="glance-httpd" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.469949 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f69cb6f3-1485-4413-81f5-4de7a3d72609" containerName="glance-httpd" Jan 05 23:31:27 crc kubenswrapper[4910]: E0105 23:31:27.469979 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f69cb6f3-1485-4413-81f5-4de7a3d72609" containerName="glance-log" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.469985 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f69cb6f3-1485-4413-81f5-4de7a3d72609" containerName="glance-log" Jan 05 23:31:27 crc kubenswrapper[4910]: E0105 23:31:27.470006 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7459c264-36f9-4ebb-a162-81373cd02f98" containerName="glance-httpd" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.470011 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="7459c264-36f9-4ebb-a162-81373cd02f98" containerName="glance-httpd" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.470226 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f69cb6f3-1485-4413-81f5-4de7a3d72609" containerName="glance-httpd" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.470249 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="7459c264-36f9-4ebb-a162-81373cd02f98" containerName="glance-httpd" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.470265 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="7459c264-36f9-4ebb-a162-81373cd02f98" containerName="glance-log" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.470280 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f69cb6f3-1485-4413-81f5-4de7a3d72609" containerName="glance-log" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.471504 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.474811 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-xt8b5" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.474981 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.475154 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.496456 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.508359 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.509488 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47fd1e78-1916-411b-9841-a503c9fdc455-config-data\") pod \"glance-default-external-api-0\" (UID: \"47fd1e78-1916-411b-9841-a503c9fdc455\") " pod="openstack/glance-default-external-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.509584 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/47fd1e78-1916-411b-9841-a503c9fdc455-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"47fd1e78-1916-411b-9841-a503c9fdc455\") " pod="openstack/glance-default-external-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.509610 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxdtf\" (UniqueName: \"kubernetes.io/projected/47fd1e78-1916-411b-9841-a503c9fdc455-kube-api-access-qxdtf\") pod \"glance-default-external-api-0\" (UID: \"47fd1e78-1916-411b-9841-a503c9fdc455\") " pod="openstack/glance-default-external-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.509639 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47fd1e78-1916-411b-9841-a503c9fdc455-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"47fd1e78-1916-411b-9841-a503c9fdc455\") " pod="openstack/glance-default-external-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.509676 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47fd1e78-1916-411b-9841-a503c9fdc455-logs\") pod \"glance-default-external-api-0\" (UID: \"47fd1e78-1916-411b-9841-a503c9fdc455\") " pod="openstack/glance-default-external-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.509714 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/47fd1e78-1916-411b-9841-a503c9fdc455-ceph\") pod \"glance-default-external-api-0\" (UID: \"47fd1e78-1916-411b-9841-a503c9fdc455\") " pod="openstack/glance-default-external-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.509733 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/47fd1e78-1916-411b-9841-a503c9fdc455-scripts\") pod \"glance-default-external-api-0\" (UID: \"47fd1e78-1916-411b-9841-a503c9fdc455\") " pod="openstack/glance-default-external-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.531339 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.540551 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.542146 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.545054 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.567836 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.611621 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47396791-d60d-4902-b7b7-7c798ac6136f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"47396791-d60d-4902-b7b7-7c798ac6136f\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.611710 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47fd1e78-1916-411b-9841-a503c9fdc455-config-data\") pod \"glance-default-external-api-0\" (UID: \"47fd1e78-1916-411b-9841-a503c9fdc455\") " pod="openstack/glance-default-external-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.611829 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/47396791-d60d-4902-b7b7-7c798ac6136f-ceph\") pod \"glance-default-internal-api-0\" (UID: \"47396791-d60d-4902-b7b7-7c798ac6136f\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.611856 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/47396791-d60d-4902-b7b7-7c798ac6136f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"47396791-d60d-4902-b7b7-7c798ac6136f\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.611887 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/47fd1e78-1916-411b-9841-a503c9fdc455-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"47fd1e78-1916-411b-9841-a503c9fdc455\") " pod="openstack/glance-default-external-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.611913 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxdtf\" (UniqueName: \"kubernetes.io/projected/47fd1e78-1916-411b-9841-a503c9fdc455-kube-api-access-qxdtf\") pod \"glance-default-external-api-0\" (UID: \"47fd1e78-1916-411b-9841-a503c9fdc455\") " pod="openstack/glance-default-external-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.611957 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47fd1e78-1916-411b-9841-a503c9fdc455-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"47fd1e78-1916-411b-9841-a503c9fdc455\") " pod="openstack/glance-default-external-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.612004 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6fnz\" (UniqueName: \"kubernetes.io/projected/47396791-d60d-4902-b7b7-7c798ac6136f-kube-api-access-w6fnz\") pod \"glance-default-internal-api-0\" (UID: \"47396791-d60d-4902-b7b7-7c798ac6136f\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.612038 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47396791-d60d-4902-b7b7-7c798ac6136f-logs\") pod \"glance-default-internal-api-0\" (UID: \"47396791-d60d-4902-b7b7-7c798ac6136f\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.612066 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47fd1e78-1916-411b-9841-a503c9fdc455-logs\") pod \"glance-default-external-api-0\" (UID: \"47fd1e78-1916-411b-9841-a503c9fdc455\") " pod="openstack/glance-default-external-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.612099 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47396791-d60d-4902-b7b7-7c798ac6136f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"47396791-d60d-4902-b7b7-7c798ac6136f\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.612137 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/47fd1e78-1916-411b-9841-a503c9fdc455-ceph\") pod \"glance-default-external-api-0\" (UID: \"47fd1e78-1916-411b-9841-a503c9fdc455\") " pod="openstack/glance-default-external-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.612192 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/47396791-d60d-4902-b7b7-7c798ac6136f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"47396791-d60d-4902-b7b7-7c798ac6136f\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.612216 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/47fd1e78-1916-411b-9841-a503c9fdc455-scripts\") pod \"glance-default-external-api-0\" (UID: \"47fd1e78-1916-411b-9841-a503c9fdc455\") " pod="openstack/glance-default-external-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.616030 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/47fd1e78-1916-411b-9841-a503c9fdc455-scripts\") pod \"glance-default-external-api-0\" (UID: \"47fd1e78-1916-411b-9841-a503c9fdc455\") " pod="openstack/glance-default-external-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.619743 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/47fd1e78-1916-411b-9841-a503c9fdc455-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"47fd1e78-1916-411b-9841-a503c9fdc455\") " pod="openstack/glance-default-external-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.620124 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47fd1e78-1916-411b-9841-a503c9fdc455-logs\") pod \"glance-default-external-api-0\" (UID: \"47fd1e78-1916-411b-9841-a503c9fdc455\") " pod="openstack/glance-default-external-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.620401 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47fd1e78-1916-411b-9841-a503c9fdc455-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"47fd1e78-1916-411b-9841-a503c9fdc455\") " pod="openstack/glance-default-external-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.620969 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47fd1e78-1916-411b-9841-a503c9fdc455-config-data\") pod \"glance-default-external-api-0\" (UID: \"47fd1e78-1916-411b-9841-a503c9fdc455\") " pod="openstack/glance-default-external-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.622457 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/47fd1e78-1916-411b-9841-a503c9fdc455-ceph\") pod \"glance-default-external-api-0\" (UID: \"47fd1e78-1916-411b-9841-a503c9fdc455\") " pod="openstack/glance-default-external-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.661891 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxdtf\" (UniqueName: \"kubernetes.io/projected/47fd1e78-1916-411b-9841-a503c9fdc455-kube-api-access-qxdtf\") pod \"glance-default-external-api-0\" (UID: \"47fd1e78-1916-411b-9841-a503c9fdc455\") " pod="openstack/glance-default-external-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.720053 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6fnz\" (UniqueName: \"kubernetes.io/projected/47396791-d60d-4902-b7b7-7c798ac6136f-kube-api-access-w6fnz\") pod \"glance-default-internal-api-0\" (UID: \"47396791-d60d-4902-b7b7-7c798ac6136f\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.747820 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47396791-d60d-4902-b7b7-7c798ac6136f-logs\") pod \"glance-default-internal-api-0\" (UID: \"47396791-d60d-4902-b7b7-7c798ac6136f\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.748025 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47396791-d60d-4902-b7b7-7c798ac6136f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"47396791-d60d-4902-b7b7-7c798ac6136f\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.748102 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/47396791-d60d-4902-b7b7-7c798ac6136f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"47396791-d60d-4902-b7b7-7c798ac6136f\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.748242 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47396791-d60d-4902-b7b7-7c798ac6136f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"47396791-d60d-4902-b7b7-7c798ac6136f\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.748470 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/47396791-d60d-4902-b7b7-7c798ac6136f-ceph\") pod \"glance-default-internal-api-0\" (UID: \"47396791-d60d-4902-b7b7-7c798ac6136f\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.748505 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/47396791-d60d-4902-b7b7-7c798ac6136f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"47396791-d60d-4902-b7b7-7c798ac6136f\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.750258 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47396791-d60d-4902-b7b7-7c798ac6136f-logs\") pod \"glance-default-internal-api-0\" (UID: \"47396791-d60d-4902-b7b7-7c798ac6136f\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.753108 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/47396791-d60d-4902-b7b7-7c798ac6136f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"47396791-d60d-4902-b7b7-7c798ac6136f\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.769955 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/47396791-d60d-4902-b7b7-7c798ac6136f-ceph\") pod \"glance-default-internal-api-0\" (UID: \"47396791-d60d-4902-b7b7-7c798ac6136f\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.771759 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/47396791-d60d-4902-b7b7-7c798ac6136f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"47396791-d60d-4902-b7b7-7c798ac6136f\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.772362 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6fnz\" (UniqueName: \"kubernetes.io/projected/47396791-d60d-4902-b7b7-7c798ac6136f-kube-api-access-w6fnz\") pod \"glance-default-internal-api-0\" (UID: \"47396791-d60d-4902-b7b7-7c798ac6136f\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.777598 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47396791-d60d-4902-b7b7-7c798ac6136f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"47396791-d60d-4902-b7b7-7c798ac6136f\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.778258 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47396791-d60d-4902-b7b7-7c798ac6136f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"47396791-d60d-4902-b7b7-7c798ac6136f\") " pod="openstack/glance-default-internal-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.813749 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 05 23:31:27 crc kubenswrapper[4910]: I0105 23:31:27.872843 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 05 23:31:28 crc kubenswrapper[4910]: I0105 23:31:28.738354 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7459c264-36f9-4ebb-a162-81373cd02f98" path="/var/lib/kubelet/pods/7459c264-36f9-4ebb-a162-81373cd02f98/volumes" Jan 05 23:31:28 crc kubenswrapper[4910]: I0105 23:31:28.742680 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f69cb6f3-1485-4413-81f5-4de7a3d72609" path="/var/lib/kubelet/pods/f69cb6f3-1485-4413-81f5-4de7a3d72609/volumes" Jan 05 23:31:29 crc kubenswrapper[4910]: I0105 23:31:29.548885 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-bfp7s"] Jan 05 23:31:29 crc kubenswrapper[4910]: I0105 23:31:29.551952 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bfp7s" Jan 05 23:31:29 crc kubenswrapper[4910]: I0105 23:31:29.565112 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bfp7s"] Jan 05 23:31:29 crc kubenswrapper[4910]: I0105 23:31:29.605367 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58845880-bb82-4fb8-bc7d-19df1f75f136-utilities\") pod \"redhat-marketplace-bfp7s\" (UID: \"58845880-bb82-4fb8-bc7d-19df1f75f136\") " pod="openshift-marketplace/redhat-marketplace-bfp7s" Jan 05 23:31:29 crc kubenswrapper[4910]: I0105 23:31:29.605496 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xtpd4\" (UniqueName: \"kubernetes.io/projected/58845880-bb82-4fb8-bc7d-19df1f75f136-kube-api-access-xtpd4\") pod \"redhat-marketplace-bfp7s\" (UID: \"58845880-bb82-4fb8-bc7d-19df1f75f136\") " pod="openshift-marketplace/redhat-marketplace-bfp7s" Jan 05 23:31:29 crc kubenswrapper[4910]: I0105 23:31:29.605529 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58845880-bb82-4fb8-bc7d-19df1f75f136-catalog-content\") pod \"redhat-marketplace-bfp7s\" (UID: \"58845880-bb82-4fb8-bc7d-19df1f75f136\") " pod="openshift-marketplace/redhat-marketplace-bfp7s" Jan 05 23:31:29 crc kubenswrapper[4910]: I0105 23:31:29.707826 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xtpd4\" (UniqueName: \"kubernetes.io/projected/58845880-bb82-4fb8-bc7d-19df1f75f136-kube-api-access-xtpd4\") pod \"redhat-marketplace-bfp7s\" (UID: \"58845880-bb82-4fb8-bc7d-19df1f75f136\") " pod="openshift-marketplace/redhat-marketplace-bfp7s" Jan 05 23:31:29 crc kubenswrapper[4910]: I0105 23:31:29.707885 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58845880-bb82-4fb8-bc7d-19df1f75f136-catalog-content\") pod \"redhat-marketplace-bfp7s\" (UID: \"58845880-bb82-4fb8-bc7d-19df1f75f136\") " pod="openshift-marketplace/redhat-marketplace-bfp7s" Jan 05 23:31:29 crc kubenswrapper[4910]: I0105 23:31:29.707992 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58845880-bb82-4fb8-bc7d-19df1f75f136-utilities\") pod \"redhat-marketplace-bfp7s\" (UID: \"58845880-bb82-4fb8-bc7d-19df1f75f136\") " pod="openshift-marketplace/redhat-marketplace-bfp7s" Jan 05 23:31:29 crc kubenswrapper[4910]: I0105 23:31:29.708640 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58845880-bb82-4fb8-bc7d-19df1f75f136-utilities\") pod \"redhat-marketplace-bfp7s\" (UID: \"58845880-bb82-4fb8-bc7d-19df1f75f136\") " pod="openshift-marketplace/redhat-marketplace-bfp7s" Jan 05 23:31:29 crc kubenswrapper[4910]: I0105 23:31:29.708710 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58845880-bb82-4fb8-bc7d-19df1f75f136-catalog-content\") pod \"redhat-marketplace-bfp7s\" (UID: \"58845880-bb82-4fb8-bc7d-19df1f75f136\") " pod="openshift-marketplace/redhat-marketplace-bfp7s" Jan 05 23:31:29 crc kubenswrapper[4910]: I0105 23:31:29.732255 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xtpd4\" (UniqueName: \"kubernetes.io/projected/58845880-bb82-4fb8-bc7d-19df1f75f136-kube-api-access-xtpd4\") pod \"redhat-marketplace-bfp7s\" (UID: \"58845880-bb82-4fb8-bc7d-19df1f75f136\") " pod="openshift-marketplace/redhat-marketplace-bfp7s" Jan 05 23:31:29 crc kubenswrapper[4910]: I0105 23:31:29.896789 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bfp7s" Jan 05 23:31:33 crc kubenswrapper[4910]: I0105 23:31:33.803699 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bfp7s"] Jan 05 23:31:33 crc kubenswrapper[4910]: I0105 23:31:33.886823 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 05 23:31:33 crc kubenswrapper[4910]: W0105 23:31:33.891802 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod47396791_d60d_4902_b7b7_7c798ac6136f.slice/crio-e37b0a88e4b820b9b5842337193c9c13e12315f6ec634dc8a383b19333de6fb6 WatchSource:0}: Error finding container e37b0a88e4b820b9b5842337193c9c13e12315f6ec634dc8a383b19333de6fb6: Status 404 returned error can't find the container with id e37b0a88e4b820b9b5842337193c9c13e12315f6ec634dc8a383b19333de6fb6 Jan 05 23:31:34 crc kubenswrapper[4910]: I0105 23:31:34.116196 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 05 23:31:34 crc kubenswrapper[4910]: W0105 23:31:34.118908 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod47fd1e78_1916_411b_9841_a503c9fdc455.slice/crio-25949d2a10780a2de1fe45077e56a67b88c7cf1416b050c2e5420df4fb93b879 WatchSource:0}: Error finding container 25949d2a10780a2de1fe45077e56a67b88c7cf1416b050c2e5420df4fb93b879: Status 404 returned error can't find the container with id 25949d2a10780a2de1fe45077e56a67b88c7cf1416b050c2e5420df4fb93b879 Jan 05 23:31:34 crc kubenswrapper[4910]: I0105 23:31:34.203090 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"47396791-d60d-4902-b7b7-7c798ac6136f","Type":"ContainerStarted","Data":"e37b0a88e4b820b9b5842337193c9c13e12315f6ec634dc8a383b19333de6fb6"} Jan 05 23:31:34 crc kubenswrapper[4910]: I0105 23:31:34.205440 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"47fd1e78-1916-411b-9841-a503c9fdc455","Type":"ContainerStarted","Data":"25949d2a10780a2de1fe45077e56a67b88c7cf1416b050c2e5420df4fb93b879"} Jan 05 23:31:34 crc kubenswrapper[4910]: I0105 23:31:34.207499 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bfp7s" event={"ID":"58845880-bb82-4fb8-bc7d-19df1f75f136","Type":"ContainerStarted","Data":"a1e672bb84825fc1e3768b50e90d12321a4f3ba518e95f357e65e0c95ff0131f"} Jan 05 23:31:35 crc kubenswrapper[4910]: I0105 23:31:35.230698 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"47396791-d60d-4902-b7b7-7c798ac6136f","Type":"ContainerStarted","Data":"492ffea1b6a50c436cebbb2c2661853078245c5e96bbb6d26d903928941a28f1"} Jan 05 23:31:35 crc kubenswrapper[4910]: I0105 23:31:35.231406 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"47396791-d60d-4902-b7b7-7c798ac6136f","Type":"ContainerStarted","Data":"ebe1baaef57186d356b3488b5abc44e56906165a9d205a28fd38d0f4d813b728"} Jan 05 23:31:35 crc kubenswrapper[4910]: I0105 23:31:35.243066 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-59cbc8f477-dh8x8" podUID="f48ced9f-d6fe-47aa-a986-c81a48b3cd4f" containerName="horizon-log" containerID="cri-o://a56ac7db0904879bece7f3b7232931532b1c5b680aefe44b5532385b4cdde7fe" gracePeriod=30 Jan 05 23:31:35 crc kubenswrapper[4910]: I0105 23:31:35.243794 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-59cbc8f477-dh8x8" event={"ID":"f48ced9f-d6fe-47aa-a986-c81a48b3cd4f","Type":"ContainerStarted","Data":"0ff158ca14212e856e6f7fb8b45bd0287ea58054ef34a23d5726d253b6264066"} Jan 05 23:31:35 crc kubenswrapper[4910]: I0105 23:31:35.243842 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-59cbc8f477-dh8x8" event={"ID":"f48ced9f-d6fe-47aa-a986-c81a48b3cd4f","Type":"ContainerStarted","Data":"a56ac7db0904879bece7f3b7232931532b1c5b680aefe44b5532385b4cdde7fe"} Jan 05 23:31:35 crc kubenswrapper[4910]: I0105 23:31:35.243915 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-59cbc8f477-dh8x8" podUID="f48ced9f-d6fe-47aa-a986-c81a48b3cd4f" containerName="horizon" containerID="cri-o://0ff158ca14212e856e6f7fb8b45bd0287ea58054ef34a23d5726d253b6264066" gracePeriod=30 Jan 05 23:31:35 crc kubenswrapper[4910]: I0105 23:31:35.268530 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6gps6" event={"ID":"d3b67709-a5e7-459a-87b0-d7d266ab60dd","Type":"ContainerStarted","Data":"aa0f716ce71de8389b6585a61eae27772e2ac770f5fd0cda0ff4b1f10592fe93"} Jan 05 23:31:35 crc kubenswrapper[4910]: I0105 23:31:35.271037 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=8.271017299 podStartE2EDuration="8.271017299s" podCreationTimestamp="2026-01-05 23:31:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:31:35.263735415 +0000 UTC m=+6026.841233085" watchObservedRunningTime="2026-01-05 23:31:35.271017299 +0000 UTC m=+6026.848514959" Jan 05 23:31:35 crc kubenswrapper[4910]: I0105 23:31:35.283994 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"47fd1e78-1916-411b-9841-a503c9fdc455","Type":"ContainerStarted","Data":"d81a5c37ece5c1a968c897cb5aec00ae4a08604a2c3f22ce5010cd0f4e912b29"} Jan 05 23:31:35 crc kubenswrapper[4910]: I0105 23:31:35.293826 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-59cbc8f477-dh8x8" podStartSLOduration=3.253228756 podStartE2EDuration="12.293804553s" podCreationTimestamp="2026-01-05 23:31:23 +0000 UTC" firstStartedPulling="2026-01-05 23:31:24.354276362 +0000 UTC m=+6015.931774032" lastFinishedPulling="2026-01-05 23:31:33.394852159 +0000 UTC m=+6024.972349829" observedRunningTime="2026-01-05 23:31:35.281991351 +0000 UTC m=+6026.859489021" watchObservedRunningTime="2026-01-05 23:31:35.293804553 +0000 UTC m=+6026.871302223" Jan 05 23:31:35 crc kubenswrapper[4910]: I0105 23:31:35.299056 4910 generic.go:334] "Generic (PLEG): container finished" podID="58845880-bb82-4fb8-bc7d-19df1f75f136" containerID="c9abe605faaa22d205e010de1ee316b76a2a43904ac346406f11abf759924cbb" exitCode=0 Jan 05 23:31:35 crc kubenswrapper[4910]: I0105 23:31:35.299121 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bfp7s" event={"ID":"58845880-bb82-4fb8-bc7d-19df1f75f136","Type":"ContainerDied","Data":"c9abe605faaa22d205e010de1ee316b76a2a43904ac346406f11abf759924cbb"} Jan 05 23:31:35 crc kubenswrapper[4910]: I0105 23:31:35.304588 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-6gps6" podStartSLOduration=3.933632757 podStartE2EDuration="12.30457416s" podCreationTimestamp="2026-01-05 23:31:23 +0000 UTC" firstStartedPulling="2026-01-05 23:31:25.02240744 +0000 UTC m=+6016.599905100" lastFinishedPulling="2026-01-05 23:31:33.393348843 +0000 UTC m=+6024.970846503" observedRunningTime="2026-01-05 23:31:35.300451311 +0000 UTC m=+6026.877948981" watchObservedRunningTime="2026-01-05 23:31:35.30457416 +0000 UTC m=+6026.882071830" Jan 05 23:31:35 crc kubenswrapper[4910]: I0105 23:31:35.316291 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-c454dd9b5-768c2" event={"ID":"6b598e3a-4b45-464f-bf19-f029193df5b2","Type":"ContainerStarted","Data":"476dc7ee26dd9e0d5b69320a76909889d1e41864a12d7c07102ecd2460981394"} Jan 05 23:31:35 crc kubenswrapper[4910]: I0105 23:31:35.316650 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-c454dd9b5-768c2" event={"ID":"6b598e3a-4b45-464f-bf19-f029193df5b2","Type":"ContainerStarted","Data":"df4eb3a4d27f36e935e46bb2126946fc01c14e6bb6e50cb38ee64ca4967d7ada"} Jan 05 23:31:35 crc kubenswrapper[4910]: I0105 23:31:35.329555 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8585df8647-wlz5f" event={"ID":"226256c6-0132-469c-af61-bf062ea41762","Type":"ContainerStarted","Data":"e2e22b952c75535273d518b9ff0c101e27b86ecf318387e70466ab879aa704b5"} Jan 05 23:31:35 crc kubenswrapper[4910]: I0105 23:31:35.329606 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8585df8647-wlz5f" event={"ID":"226256c6-0132-469c-af61-bf062ea41762","Type":"ContainerStarted","Data":"d2d59482631a40a4a2ad4071dab47b5c28192b80780bc401ff67e1f7af399f06"} Jan 05 23:31:35 crc kubenswrapper[4910]: I0105 23:31:35.353286 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-c454dd9b5-768c2" podStartSLOduration=3.187021896 podStartE2EDuration="12.353263641s" podCreationTimestamp="2026-01-05 23:31:23 +0000 UTC" firstStartedPulling="2026-01-05 23:31:24.19030309 +0000 UTC m=+6015.767800760" lastFinishedPulling="2026-01-05 23:31:33.356544825 +0000 UTC m=+6024.934042505" observedRunningTime="2026-01-05 23:31:35.345898565 +0000 UTC m=+6026.923396235" watchObservedRunningTime="2026-01-05 23:31:35.353263641 +0000 UTC m=+6026.930761311" Jan 05 23:31:35 crc kubenswrapper[4910]: I0105 23:31:35.378139 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-8585df8647-wlz5f" podStartSLOduration=3.7756945589999997 podStartE2EDuration="12.378103974s" podCreationTimestamp="2026-01-05 23:31:23 +0000 UTC" firstStartedPulling="2026-01-05 23:31:24.79185474 +0000 UTC m=+6016.369352410" lastFinishedPulling="2026-01-05 23:31:33.394264155 +0000 UTC m=+6024.971761825" observedRunningTime="2026-01-05 23:31:35.368825393 +0000 UTC m=+6026.946323063" watchObservedRunningTime="2026-01-05 23:31:35.378103974 +0000 UTC m=+6026.955601644" Jan 05 23:31:36 crc kubenswrapper[4910]: I0105 23:31:36.349306 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"47fd1e78-1916-411b-9841-a503c9fdc455","Type":"ContainerStarted","Data":"b4dbe87e631663ebcf5bb7de6295a8bf4c3b8cddeccbf21b0b6056df3bc3aea9"} Jan 05 23:31:36 crc kubenswrapper[4910]: I0105 23:31:36.352670 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bfp7s" event={"ID":"58845880-bb82-4fb8-bc7d-19df1f75f136","Type":"ContainerStarted","Data":"c12f7776f974bfed580366e247ef5ded8814a81dbb2e101b4d898329a043d492"} Jan 05 23:31:36 crc kubenswrapper[4910]: I0105 23:31:36.384278 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=9.384257278 podStartE2EDuration="9.384257278s" podCreationTimestamp="2026-01-05 23:31:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:31:36.379767301 +0000 UTC m=+6027.957264991" watchObservedRunningTime="2026-01-05 23:31:36.384257278 +0000 UTC m=+6027.961754938" Jan 05 23:31:37 crc kubenswrapper[4910]: I0105 23:31:37.386083 4910 generic.go:334] "Generic (PLEG): container finished" podID="58845880-bb82-4fb8-bc7d-19df1f75f136" containerID="c12f7776f974bfed580366e247ef5ded8814a81dbb2e101b4d898329a043d492" exitCode=0 Jan 05 23:31:37 crc kubenswrapper[4910]: I0105 23:31:37.386335 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bfp7s" event={"ID":"58845880-bb82-4fb8-bc7d-19df1f75f136","Type":"ContainerDied","Data":"c12f7776f974bfed580366e247ef5ded8814a81dbb2e101b4d898329a043d492"} Jan 05 23:31:37 crc kubenswrapper[4910]: I0105 23:31:37.388800 4910 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 05 23:31:37 crc kubenswrapper[4910]: I0105 23:31:37.815047 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 05 23:31:37 crc kubenswrapper[4910]: I0105 23:31:37.815364 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 05 23:31:37 crc kubenswrapper[4910]: I0105 23:31:37.856933 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 05 23:31:37 crc kubenswrapper[4910]: I0105 23:31:37.863249 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 05 23:31:37 crc kubenswrapper[4910]: I0105 23:31:37.873376 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 05 23:31:37 crc kubenswrapper[4910]: I0105 23:31:37.873413 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 05 23:31:37 crc kubenswrapper[4910]: I0105 23:31:37.932334 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 05 23:31:37 crc kubenswrapper[4910]: I0105 23:31:37.935758 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 05 23:31:38 crc kubenswrapper[4910]: I0105 23:31:38.397165 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 05 23:31:38 crc kubenswrapper[4910]: I0105 23:31:38.397426 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 05 23:31:38 crc kubenswrapper[4910]: I0105 23:31:38.397471 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 05 23:31:38 crc kubenswrapper[4910]: I0105 23:31:38.397484 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 05 23:31:38 crc kubenswrapper[4910]: I0105 23:31:38.728949 4910 scope.go:117] "RemoveContainer" containerID="f0334e4359831541c2f90c0defd8867b10c126be0235edac4acd76d34a0ba514" Jan 05 23:31:38 crc kubenswrapper[4910]: E0105 23:31:38.729357 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:31:39 crc kubenswrapper[4910]: I0105 23:31:39.408303 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bfp7s" event={"ID":"58845880-bb82-4fb8-bc7d-19df1f75f136","Type":"ContainerStarted","Data":"8d482e6ebf9e413ab35912290695e1c105fdedc9cbf626635d3d9d5b9d656eca"} Jan 05 23:31:39 crc kubenswrapper[4910]: I0105 23:31:39.429022 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-bfp7s" podStartSLOduration=7.006740022 podStartE2EDuration="10.428999387s" podCreationTimestamp="2026-01-05 23:31:29 +0000 UTC" firstStartedPulling="2026-01-05 23:31:35.305295357 +0000 UTC m=+6026.882793027" lastFinishedPulling="2026-01-05 23:31:38.727554712 +0000 UTC m=+6030.305052392" observedRunningTime="2026-01-05 23:31:39.424565521 +0000 UTC m=+6031.002063191" watchObservedRunningTime="2026-01-05 23:31:39.428999387 +0000 UTC m=+6031.006497057" Jan 05 23:31:39 crc kubenswrapper[4910]: I0105 23:31:39.897545 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-bfp7s" Jan 05 23:31:39 crc kubenswrapper[4910]: I0105 23:31:39.897594 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-bfp7s" Jan 05 23:31:40 crc kubenswrapper[4910]: I0105 23:31:40.047591 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-km68x"] Jan 05 23:31:40 crc kubenswrapper[4910]: I0105 23:31:40.063447 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-km68x"] Jan 05 23:31:40 crc kubenswrapper[4910]: I0105 23:31:40.072711 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-8db8-account-create-update-8zh22"] Jan 05 23:31:40 crc kubenswrapper[4910]: I0105 23:31:40.084104 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-8db8-account-create-update-8zh22"] Jan 05 23:31:40 crc kubenswrapper[4910]: I0105 23:31:40.733512 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="079de26f-9048-4f5e-b884-732587f9606b" path="/var/lib/kubelet/pods/079de26f-9048-4f5e-b884-732587f9606b/volumes" Jan 05 23:31:40 crc kubenswrapper[4910]: I0105 23:31:40.734689 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1fc15311-6c7f-44f9-81b3-497c39223359" path="/var/lib/kubelet/pods/1fc15311-6c7f-44f9-81b3-497c39223359/volumes" Jan 05 23:31:40 crc kubenswrapper[4910]: I0105 23:31:40.967311 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-bfp7s" podUID="58845880-bb82-4fb8-bc7d-19df1f75f136" containerName="registry-server" probeResult="failure" output=< Jan 05 23:31:40 crc kubenswrapper[4910]: timeout: failed to connect service ":50051" within 1s Jan 05 23:31:40 crc kubenswrapper[4910]: > Jan 05 23:31:41 crc kubenswrapper[4910]: I0105 23:31:41.586721 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 05 23:31:41 crc kubenswrapper[4910]: I0105 23:31:41.688030 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 05 23:31:41 crc kubenswrapper[4910]: I0105 23:31:41.700463 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 05 23:31:43 crc kubenswrapper[4910]: I0105 23:31:43.476491 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-c454dd9b5-768c2" Jan 05 23:31:43 crc kubenswrapper[4910]: I0105 23:31:43.476799 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-c454dd9b5-768c2" Jan 05 23:31:43 crc kubenswrapper[4910]: I0105 23:31:43.649519 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 05 23:31:43 crc kubenswrapper[4910]: I0105 23:31:43.669683 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-59cbc8f477-dh8x8" Jan 05 23:31:44 crc kubenswrapper[4910]: I0105 23:31:44.114842 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-6gps6" Jan 05 23:31:44 crc kubenswrapper[4910]: I0105 23:31:44.115105 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-6gps6" Jan 05 23:31:44 crc kubenswrapper[4910]: I0105 23:31:44.320763 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-8585df8647-wlz5f" Jan 05 23:31:44 crc kubenswrapper[4910]: I0105 23:31:44.320803 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-8585df8647-wlz5f" Jan 05 23:31:44 crc kubenswrapper[4910]: I0105 23:31:44.322616 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-8585df8647-wlz5f" podUID="226256c6-0132-469c-af61-bf062ea41762" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.117:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.117:8080: connect: connection refused" Jan 05 23:31:45 crc kubenswrapper[4910]: I0105 23:31:45.168718 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-6gps6" podUID="d3b67709-a5e7-459a-87b0-d7d266ab60dd" containerName="registry-server" probeResult="failure" output=< Jan 05 23:31:45 crc kubenswrapper[4910]: timeout: failed to connect service ":50051" within 1s Jan 05 23:31:45 crc kubenswrapper[4910]: > Jan 05 23:31:46 crc kubenswrapper[4910]: I0105 23:31:46.661036 4910 scope.go:117] "RemoveContainer" containerID="988d6ed10f626896fbfd1e282cfe271e921f7766df18c0e8a74e3fb9d05c2af2" Jan 05 23:31:46 crc kubenswrapper[4910]: I0105 23:31:46.689916 4910 scope.go:117] "RemoveContainer" containerID="be74830cf4fb9fca766fe680f75626aafaa6e4d44c127d4e316b2cdebcf45b78" Jan 05 23:31:46 crc kubenswrapper[4910]: I0105 23:31:46.749155 4910 scope.go:117] "RemoveContainer" containerID="ad6b234209b0f06705f68322ef63cc28301787c506756148cfb089ccd9a68992" Jan 05 23:31:46 crc kubenswrapper[4910]: I0105 23:31:46.792031 4910 scope.go:117] "RemoveContainer" containerID="2be6d3b22040bcb1f7353a9c562c48f0162d1e94509e046aaf2ed2f5131b462b" Jan 05 23:31:46 crc kubenswrapper[4910]: I0105 23:31:46.859966 4910 scope.go:117] "RemoveContainer" containerID="e94082365609cceb04e6d9f40175c7d32943fefe7a1c465491770fc03ad68fb0" Jan 05 23:31:49 crc kubenswrapper[4910]: I0105 23:31:49.059948 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-qm7fx"] Jan 05 23:31:49 crc kubenswrapper[4910]: I0105 23:31:49.073811 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-qm7fx"] Jan 05 23:31:49 crc kubenswrapper[4910]: I0105 23:31:49.965894 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-bfp7s" Jan 05 23:31:50 crc kubenswrapper[4910]: I0105 23:31:50.035195 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-bfp7s" Jan 05 23:31:50 crc kubenswrapper[4910]: I0105 23:31:50.210178 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bfp7s"] Jan 05 23:31:50 crc kubenswrapper[4910]: I0105 23:31:50.735117 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b3d03b7-e9b1-4577-a181-f103c4121ecf" path="/var/lib/kubelet/pods/5b3d03b7-e9b1-4577-a181-f103c4121ecf/volumes" Jan 05 23:31:51 crc kubenswrapper[4910]: I0105 23:31:51.544765 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-bfp7s" podUID="58845880-bb82-4fb8-bc7d-19df1f75f136" containerName="registry-server" containerID="cri-o://8d482e6ebf9e413ab35912290695e1c105fdedc9cbf626635d3d9d5b9d656eca" gracePeriod=2 Jan 05 23:31:52 crc kubenswrapper[4910]: I0105 23:31:52.129765 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bfp7s" Jan 05 23:31:52 crc kubenswrapper[4910]: I0105 23:31:52.196820 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58845880-bb82-4fb8-bc7d-19df1f75f136-utilities\") pod \"58845880-bb82-4fb8-bc7d-19df1f75f136\" (UID: \"58845880-bb82-4fb8-bc7d-19df1f75f136\") " Jan 05 23:31:52 crc kubenswrapper[4910]: I0105 23:31:52.197198 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58845880-bb82-4fb8-bc7d-19df1f75f136-catalog-content\") pod \"58845880-bb82-4fb8-bc7d-19df1f75f136\" (UID: \"58845880-bb82-4fb8-bc7d-19df1f75f136\") " Jan 05 23:31:52 crc kubenswrapper[4910]: I0105 23:31:52.197296 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xtpd4\" (UniqueName: \"kubernetes.io/projected/58845880-bb82-4fb8-bc7d-19df1f75f136-kube-api-access-xtpd4\") pod \"58845880-bb82-4fb8-bc7d-19df1f75f136\" (UID: \"58845880-bb82-4fb8-bc7d-19df1f75f136\") " Jan 05 23:31:52 crc kubenswrapper[4910]: I0105 23:31:52.199329 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58845880-bb82-4fb8-bc7d-19df1f75f136-utilities" (OuterVolumeSpecName: "utilities") pod "58845880-bb82-4fb8-bc7d-19df1f75f136" (UID: "58845880-bb82-4fb8-bc7d-19df1f75f136"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:31:52 crc kubenswrapper[4910]: I0105 23:31:52.206482 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58845880-bb82-4fb8-bc7d-19df1f75f136-kube-api-access-xtpd4" (OuterVolumeSpecName: "kube-api-access-xtpd4") pod "58845880-bb82-4fb8-bc7d-19df1f75f136" (UID: "58845880-bb82-4fb8-bc7d-19df1f75f136"). InnerVolumeSpecName "kube-api-access-xtpd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:31:52 crc kubenswrapper[4910]: I0105 23:31:52.251844 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58845880-bb82-4fb8-bc7d-19df1f75f136-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "58845880-bb82-4fb8-bc7d-19df1f75f136" (UID: "58845880-bb82-4fb8-bc7d-19df1f75f136"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:31:52 crc kubenswrapper[4910]: I0105 23:31:52.299654 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58845880-bb82-4fb8-bc7d-19df1f75f136-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 23:31:52 crc kubenswrapper[4910]: I0105 23:31:52.299731 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xtpd4\" (UniqueName: \"kubernetes.io/projected/58845880-bb82-4fb8-bc7d-19df1f75f136-kube-api-access-xtpd4\") on node \"crc\" DevicePath \"\"" Jan 05 23:31:52 crc kubenswrapper[4910]: I0105 23:31:52.299749 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58845880-bb82-4fb8-bc7d-19df1f75f136-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 23:31:52 crc kubenswrapper[4910]: I0105 23:31:52.566911 4910 generic.go:334] "Generic (PLEG): container finished" podID="58845880-bb82-4fb8-bc7d-19df1f75f136" containerID="8d482e6ebf9e413ab35912290695e1c105fdedc9cbf626635d3d9d5b9d656eca" exitCode=0 Jan 05 23:31:52 crc kubenswrapper[4910]: I0105 23:31:52.566954 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bfp7s" event={"ID":"58845880-bb82-4fb8-bc7d-19df1f75f136","Type":"ContainerDied","Data":"8d482e6ebf9e413ab35912290695e1c105fdedc9cbf626635d3d9d5b9d656eca"} Jan 05 23:31:52 crc kubenswrapper[4910]: I0105 23:31:52.567000 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bfp7s" event={"ID":"58845880-bb82-4fb8-bc7d-19df1f75f136","Type":"ContainerDied","Data":"a1e672bb84825fc1e3768b50e90d12321a4f3ba518e95f357e65e0c95ff0131f"} Jan 05 23:31:52 crc kubenswrapper[4910]: I0105 23:31:52.567021 4910 scope.go:117] "RemoveContainer" containerID="8d482e6ebf9e413ab35912290695e1c105fdedc9cbf626635d3d9d5b9d656eca" Jan 05 23:31:52 crc kubenswrapper[4910]: I0105 23:31:52.567272 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bfp7s" Jan 05 23:31:52 crc kubenswrapper[4910]: I0105 23:31:52.593453 4910 scope.go:117] "RemoveContainer" containerID="c12f7776f974bfed580366e247ef5ded8814a81dbb2e101b4d898329a043d492" Jan 05 23:31:52 crc kubenswrapper[4910]: I0105 23:31:52.618589 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bfp7s"] Jan 05 23:31:52 crc kubenswrapper[4910]: I0105 23:31:52.629615 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-bfp7s"] Jan 05 23:31:52 crc kubenswrapper[4910]: I0105 23:31:52.639026 4910 scope.go:117] "RemoveContainer" containerID="c9abe605faaa22d205e010de1ee316b76a2a43904ac346406f11abf759924cbb" Jan 05 23:31:52 crc kubenswrapper[4910]: I0105 23:31:52.684727 4910 scope.go:117] "RemoveContainer" containerID="8d482e6ebf9e413ab35912290695e1c105fdedc9cbf626635d3d9d5b9d656eca" Jan 05 23:31:52 crc kubenswrapper[4910]: E0105 23:31:52.685389 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d482e6ebf9e413ab35912290695e1c105fdedc9cbf626635d3d9d5b9d656eca\": container with ID starting with 8d482e6ebf9e413ab35912290695e1c105fdedc9cbf626635d3d9d5b9d656eca not found: ID does not exist" containerID="8d482e6ebf9e413ab35912290695e1c105fdedc9cbf626635d3d9d5b9d656eca" Jan 05 23:31:52 crc kubenswrapper[4910]: I0105 23:31:52.685425 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d482e6ebf9e413ab35912290695e1c105fdedc9cbf626635d3d9d5b9d656eca"} err="failed to get container status \"8d482e6ebf9e413ab35912290695e1c105fdedc9cbf626635d3d9d5b9d656eca\": rpc error: code = NotFound desc = could not find container \"8d482e6ebf9e413ab35912290695e1c105fdedc9cbf626635d3d9d5b9d656eca\": container with ID starting with 8d482e6ebf9e413ab35912290695e1c105fdedc9cbf626635d3d9d5b9d656eca not found: ID does not exist" Jan 05 23:31:52 crc kubenswrapper[4910]: I0105 23:31:52.685452 4910 scope.go:117] "RemoveContainer" containerID="c12f7776f974bfed580366e247ef5ded8814a81dbb2e101b4d898329a043d492" Jan 05 23:31:52 crc kubenswrapper[4910]: E0105 23:31:52.685921 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c12f7776f974bfed580366e247ef5ded8814a81dbb2e101b4d898329a043d492\": container with ID starting with c12f7776f974bfed580366e247ef5ded8814a81dbb2e101b4d898329a043d492 not found: ID does not exist" containerID="c12f7776f974bfed580366e247ef5ded8814a81dbb2e101b4d898329a043d492" Jan 05 23:31:52 crc kubenswrapper[4910]: I0105 23:31:52.685975 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c12f7776f974bfed580366e247ef5ded8814a81dbb2e101b4d898329a043d492"} err="failed to get container status \"c12f7776f974bfed580366e247ef5ded8814a81dbb2e101b4d898329a043d492\": rpc error: code = NotFound desc = could not find container \"c12f7776f974bfed580366e247ef5ded8814a81dbb2e101b4d898329a043d492\": container with ID starting with c12f7776f974bfed580366e247ef5ded8814a81dbb2e101b4d898329a043d492 not found: ID does not exist" Jan 05 23:31:52 crc kubenswrapper[4910]: I0105 23:31:52.686013 4910 scope.go:117] "RemoveContainer" containerID="c9abe605faaa22d205e010de1ee316b76a2a43904ac346406f11abf759924cbb" Jan 05 23:31:52 crc kubenswrapper[4910]: E0105 23:31:52.686438 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c9abe605faaa22d205e010de1ee316b76a2a43904ac346406f11abf759924cbb\": container with ID starting with c9abe605faaa22d205e010de1ee316b76a2a43904ac346406f11abf759924cbb not found: ID does not exist" containerID="c9abe605faaa22d205e010de1ee316b76a2a43904ac346406f11abf759924cbb" Jan 05 23:31:52 crc kubenswrapper[4910]: I0105 23:31:52.686466 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9abe605faaa22d205e010de1ee316b76a2a43904ac346406f11abf759924cbb"} err="failed to get container status \"c9abe605faaa22d205e010de1ee316b76a2a43904ac346406f11abf759924cbb\": rpc error: code = NotFound desc = could not find container \"c9abe605faaa22d205e010de1ee316b76a2a43904ac346406f11abf759924cbb\": container with ID starting with c9abe605faaa22d205e010de1ee316b76a2a43904ac346406f11abf759924cbb not found: ID does not exist" Jan 05 23:31:52 crc kubenswrapper[4910]: I0105 23:31:52.733999 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="58845880-bb82-4fb8-bc7d-19df1f75f136" path="/var/lib/kubelet/pods/58845880-bb82-4fb8-bc7d-19df1f75f136/volumes" Jan 05 23:31:53 crc kubenswrapper[4910]: I0105 23:31:53.479769 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-c454dd9b5-768c2" podUID="6b598e3a-4b45-464f-bf19-f029193df5b2" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.114:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.114:8080: connect: connection refused" Jan 05 23:31:53 crc kubenswrapper[4910]: I0105 23:31:53.722841 4910 scope.go:117] "RemoveContainer" containerID="f0334e4359831541c2f90c0defd8867b10c126be0235edac4acd76d34a0ba514" Jan 05 23:31:53 crc kubenswrapper[4910]: E0105 23:31:53.723165 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:31:54 crc kubenswrapper[4910]: I0105 23:31:54.181318 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-6gps6" Jan 05 23:31:54 crc kubenswrapper[4910]: I0105 23:31:54.231726 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-6gps6" Jan 05 23:31:55 crc kubenswrapper[4910]: I0105 23:31:55.618273 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6gps6"] Jan 05 23:31:55 crc kubenswrapper[4910]: I0105 23:31:55.618624 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-6gps6" podUID="d3b67709-a5e7-459a-87b0-d7d266ab60dd" containerName="registry-server" containerID="cri-o://aa0f716ce71de8389b6585a61eae27772e2ac770f5fd0cda0ff4b1f10592fe93" gracePeriod=2 Jan 05 23:31:56 crc kubenswrapper[4910]: I0105 23:31:56.088173 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-8585df8647-wlz5f" Jan 05 23:31:56 crc kubenswrapper[4910]: I0105 23:31:56.174841 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6gps6" Jan 05 23:31:56 crc kubenswrapper[4910]: I0105 23:31:56.286927 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3b67709-a5e7-459a-87b0-d7d266ab60dd-catalog-content\") pod \"d3b67709-a5e7-459a-87b0-d7d266ab60dd\" (UID: \"d3b67709-a5e7-459a-87b0-d7d266ab60dd\") " Jan 05 23:31:56 crc kubenswrapper[4910]: I0105 23:31:56.287187 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3b67709-a5e7-459a-87b0-d7d266ab60dd-utilities\") pod \"d3b67709-a5e7-459a-87b0-d7d266ab60dd\" (UID: \"d3b67709-a5e7-459a-87b0-d7d266ab60dd\") " Jan 05 23:31:56 crc kubenswrapper[4910]: I0105 23:31:56.287220 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-485d4\" (UniqueName: \"kubernetes.io/projected/d3b67709-a5e7-459a-87b0-d7d266ab60dd-kube-api-access-485d4\") pod \"d3b67709-a5e7-459a-87b0-d7d266ab60dd\" (UID: \"d3b67709-a5e7-459a-87b0-d7d266ab60dd\") " Jan 05 23:31:56 crc kubenswrapper[4910]: I0105 23:31:56.287619 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3b67709-a5e7-459a-87b0-d7d266ab60dd-utilities" (OuterVolumeSpecName: "utilities") pod "d3b67709-a5e7-459a-87b0-d7d266ab60dd" (UID: "d3b67709-a5e7-459a-87b0-d7d266ab60dd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:31:56 crc kubenswrapper[4910]: I0105 23:31:56.288034 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3b67709-a5e7-459a-87b0-d7d266ab60dd-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 23:31:56 crc kubenswrapper[4910]: I0105 23:31:56.293233 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3b67709-a5e7-459a-87b0-d7d266ab60dd-kube-api-access-485d4" (OuterVolumeSpecName: "kube-api-access-485d4") pod "d3b67709-a5e7-459a-87b0-d7d266ab60dd" (UID: "d3b67709-a5e7-459a-87b0-d7d266ab60dd"). InnerVolumeSpecName "kube-api-access-485d4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:31:56 crc kubenswrapper[4910]: I0105 23:31:56.389818 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-485d4\" (UniqueName: \"kubernetes.io/projected/d3b67709-a5e7-459a-87b0-d7d266ab60dd-kube-api-access-485d4\") on node \"crc\" DevicePath \"\"" Jan 05 23:31:56 crc kubenswrapper[4910]: I0105 23:31:56.404919 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3b67709-a5e7-459a-87b0-d7d266ab60dd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d3b67709-a5e7-459a-87b0-d7d266ab60dd" (UID: "d3b67709-a5e7-459a-87b0-d7d266ab60dd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:31:56 crc kubenswrapper[4910]: I0105 23:31:56.492222 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3b67709-a5e7-459a-87b0-d7d266ab60dd-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 23:31:56 crc kubenswrapper[4910]: I0105 23:31:56.623165 4910 generic.go:334] "Generic (PLEG): container finished" podID="d3b67709-a5e7-459a-87b0-d7d266ab60dd" containerID="aa0f716ce71de8389b6585a61eae27772e2ac770f5fd0cda0ff4b1f10592fe93" exitCode=0 Jan 05 23:31:56 crc kubenswrapper[4910]: I0105 23:31:56.623216 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6gps6" event={"ID":"d3b67709-a5e7-459a-87b0-d7d266ab60dd","Type":"ContainerDied","Data":"aa0f716ce71de8389b6585a61eae27772e2ac770f5fd0cda0ff4b1f10592fe93"} Jan 05 23:31:56 crc kubenswrapper[4910]: I0105 23:31:56.623250 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6gps6" event={"ID":"d3b67709-a5e7-459a-87b0-d7d266ab60dd","Type":"ContainerDied","Data":"aae11ec25e7c776a56c1cdc1af899f96f0fadc6ac396811ba992a065d6ae499c"} Jan 05 23:31:56 crc kubenswrapper[4910]: I0105 23:31:56.623274 4910 scope.go:117] "RemoveContainer" containerID="aa0f716ce71de8389b6585a61eae27772e2ac770f5fd0cda0ff4b1f10592fe93" Jan 05 23:31:56 crc kubenswrapper[4910]: I0105 23:31:56.623310 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6gps6" Jan 05 23:31:56 crc kubenswrapper[4910]: I0105 23:31:56.655586 4910 scope.go:117] "RemoveContainer" containerID="0bab43a594261ee1d947c33a9fdf176c56e83e6212217d67c45dd6d3195d6e30" Jan 05 23:31:56 crc kubenswrapper[4910]: I0105 23:31:56.683768 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6gps6"] Jan 05 23:31:56 crc kubenswrapper[4910]: I0105 23:31:56.699474 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-6gps6"] Jan 05 23:31:56 crc kubenswrapper[4910]: I0105 23:31:56.705916 4910 scope.go:117] "RemoveContainer" containerID="f47b563c368921c9a61d0315f447127ea1ffc00e9ce791d3f9d9783908f819e7" Jan 05 23:31:56 crc kubenswrapper[4910]: I0105 23:31:56.735557 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3b67709-a5e7-459a-87b0-d7d266ab60dd" path="/var/lib/kubelet/pods/d3b67709-a5e7-459a-87b0-d7d266ab60dd/volumes" Jan 05 23:31:56 crc kubenswrapper[4910]: I0105 23:31:56.770862 4910 scope.go:117] "RemoveContainer" containerID="aa0f716ce71de8389b6585a61eae27772e2ac770f5fd0cda0ff4b1f10592fe93" Jan 05 23:31:56 crc kubenswrapper[4910]: E0105 23:31:56.771403 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aa0f716ce71de8389b6585a61eae27772e2ac770f5fd0cda0ff4b1f10592fe93\": container with ID starting with aa0f716ce71de8389b6585a61eae27772e2ac770f5fd0cda0ff4b1f10592fe93 not found: ID does not exist" containerID="aa0f716ce71de8389b6585a61eae27772e2ac770f5fd0cda0ff4b1f10592fe93" Jan 05 23:31:56 crc kubenswrapper[4910]: I0105 23:31:56.771456 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aa0f716ce71de8389b6585a61eae27772e2ac770f5fd0cda0ff4b1f10592fe93"} err="failed to get container status \"aa0f716ce71de8389b6585a61eae27772e2ac770f5fd0cda0ff4b1f10592fe93\": rpc error: code = NotFound desc = could not find container \"aa0f716ce71de8389b6585a61eae27772e2ac770f5fd0cda0ff4b1f10592fe93\": container with ID starting with aa0f716ce71de8389b6585a61eae27772e2ac770f5fd0cda0ff4b1f10592fe93 not found: ID does not exist" Jan 05 23:31:56 crc kubenswrapper[4910]: I0105 23:31:56.771491 4910 scope.go:117] "RemoveContainer" containerID="0bab43a594261ee1d947c33a9fdf176c56e83e6212217d67c45dd6d3195d6e30" Jan 05 23:31:56 crc kubenswrapper[4910]: E0105 23:31:56.771901 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0bab43a594261ee1d947c33a9fdf176c56e83e6212217d67c45dd6d3195d6e30\": container with ID starting with 0bab43a594261ee1d947c33a9fdf176c56e83e6212217d67c45dd6d3195d6e30 not found: ID does not exist" containerID="0bab43a594261ee1d947c33a9fdf176c56e83e6212217d67c45dd6d3195d6e30" Jan 05 23:31:56 crc kubenswrapper[4910]: I0105 23:31:56.771955 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0bab43a594261ee1d947c33a9fdf176c56e83e6212217d67c45dd6d3195d6e30"} err="failed to get container status \"0bab43a594261ee1d947c33a9fdf176c56e83e6212217d67c45dd6d3195d6e30\": rpc error: code = NotFound desc = could not find container \"0bab43a594261ee1d947c33a9fdf176c56e83e6212217d67c45dd6d3195d6e30\": container with ID starting with 0bab43a594261ee1d947c33a9fdf176c56e83e6212217d67c45dd6d3195d6e30 not found: ID does not exist" Jan 05 23:31:56 crc kubenswrapper[4910]: I0105 23:31:56.771970 4910 scope.go:117] "RemoveContainer" containerID="f47b563c368921c9a61d0315f447127ea1ffc00e9ce791d3f9d9783908f819e7" Jan 05 23:31:56 crc kubenswrapper[4910]: E0105 23:31:56.772339 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f47b563c368921c9a61d0315f447127ea1ffc00e9ce791d3f9d9783908f819e7\": container with ID starting with f47b563c368921c9a61d0315f447127ea1ffc00e9ce791d3f9d9783908f819e7 not found: ID does not exist" containerID="f47b563c368921c9a61d0315f447127ea1ffc00e9ce791d3f9d9783908f819e7" Jan 05 23:31:56 crc kubenswrapper[4910]: I0105 23:31:56.772374 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f47b563c368921c9a61d0315f447127ea1ffc00e9ce791d3f9d9783908f819e7"} err="failed to get container status \"f47b563c368921c9a61d0315f447127ea1ffc00e9ce791d3f9d9783908f819e7\": rpc error: code = NotFound desc = could not find container \"f47b563c368921c9a61d0315f447127ea1ffc00e9ce791d3f9d9783908f819e7\": container with ID starting with f47b563c368921c9a61d0315f447127ea1ffc00e9ce791d3f9d9783908f819e7 not found: ID does not exist" Jan 05 23:31:57 crc kubenswrapper[4910]: I0105 23:31:57.745992 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-8585df8647-wlz5f" Jan 05 23:31:57 crc kubenswrapper[4910]: I0105 23:31:57.860697 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-c454dd9b5-768c2"] Jan 05 23:31:57 crc kubenswrapper[4910]: I0105 23:31:57.860956 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-c454dd9b5-768c2" podUID="6b598e3a-4b45-464f-bf19-f029193df5b2" containerName="horizon-log" containerID="cri-o://df4eb3a4d27f36e935e46bb2126946fc01c14e6bb6e50cb38ee64ca4967d7ada" gracePeriod=30 Jan 05 23:31:57 crc kubenswrapper[4910]: I0105 23:31:57.861483 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-c454dd9b5-768c2" podUID="6b598e3a-4b45-464f-bf19-f029193df5b2" containerName="horizon" containerID="cri-o://476dc7ee26dd9e0d5b69320a76909889d1e41864a12d7c07102ecd2460981394" gracePeriod=30 Jan 05 23:31:58 crc kubenswrapper[4910]: I0105 23:31:58.657492 4910 generic.go:334] "Generic (PLEG): container finished" podID="6b598e3a-4b45-464f-bf19-f029193df5b2" containerID="476dc7ee26dd9e0d5b69320a76909889d1e41864a12d7c07102ecd2460981394" exitCode=0 Jan 05 23:31:58 crc kubenswrapper[4910]: I0105 23:31:58.657631 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-c454dd9b5-768c2" event={"ID":"6b598e3a-4b45-464f-bf19-f029193df5b2","Type":"ContainerDied","Data":"476dc7ee26dd9e0d5b69320a76909889d1e41864a12d7c07102ecd2460981394"} Jan 05 23:32:05 crc kubenswrapper[4910]: I0105 23:32:05.721816 4910 scope.go:117] "RemoveContainer" containerID="f0334e4359831541c2f90c0defd8867b10c126be0235edac4acd76d34a0ba514" Jan 05 23:32:05 crc kubenswrapper[4910]: E0105 23:32:05.722642 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:32:05 crc kubenswrapper[4910]: I0105 23:32:05.755898 4910 generic.go:334] "Generic (PLEG): container finished" podID="f48ced9f-d6fe-47aa-a986-c81a48b3cd4f" containerID="0ff158ca14212e856e6f7fb8b45bd0287ea58054ef34a23d5726d253b6264066" exitCode=137 Jan 05 23:32:05 crc kubenswrapper[4910]: I0105 23:32:05.755926 4910 generic.go:334] "Generic (PLEG): container finished" podID="f48ced9f-d6fe-47aa-a986-c81a48b3cd4f" containerID="a56ac7db0904879bece7f3b7232931532b1c5b680aefe44b5532385b4cdde7fe" exitCode=137 Jan 05 23:32:05 crc kubenswrapper[4910]: I0105 23:32:05.755948 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-59cbc8f477-dh8x8" event={"ID":"f48ced9f-d6fe-47aa-a986-c81a48b3cd4f","Type":"ContainerDied","Data":"0ff158ca14212e856e6f7fb8b45bd0287ea58054ef34a23d5726d253b6264066"} Jan 05 23:32:05 crc kubenswrapper[4910]: I0105 23:32:05.755976 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-59cbc8f477-dh8x8" event={"ID":"f48ced9f-d6fe-47aa-a986-c81a48b3cd4f","Type":"ContainerDied","Data":"a56ac7db0904879bece7f3b7232931532b1c5b680aefe44b5532385b4cdde7fe"} Jan 05 23:32:05 crc kubenswrapper[4910]: I0105 23:32:05.755988 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-59cbc8f477-dh8x8" event={"ID":"f48ced9f-d6fe-47aa-a986-c81a48b3cd4f","Type":"ContainerDied","Data":"73c1e44c28fd96ff732cde70379d4cb8d8f7c3f3e24e5656de636b33d0471cd8"} Jan 05 23:32:05 crc kubenswrapper[4910]: I0105 23:32:05.755998 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="73c1e44c28fd96ff732cde70379d4cb8d8f7c3f3e24e5656de636b33d0471cd8" Jan 05 23:32:05 crc kubenswrapper[4910]: I0105 23:32:05.782596 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-59cbc8f477-dh8x8" Jan 05 23:32:05 crc kubenswrapper[4910]: I0105 23:32:05.823012 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f48ced9f-d6fe-47aa-a986-c81a48b3cd4f-scripts\") pod \"f48ced9f-d6fe-47aa-a986-c81a48b3cd4f\" (UID: \"f48ced9f-d6fe-47aa-a986-c81a48b3cd4f\") " Jan 05 23:32:05 crc kubenswrapper[4910]: I0105 23:32:05.824233 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f48ced9f-d6fe-47aa-a986-c81a48b3cd4f-horizon-secret-key\") pod \"f48ced9f-d6fe-47aa-a986-c81a48b3cd4f\" (UID: \"f48ced9f-d6fe-47aa-a986-c81a48b3cd4f\") " Jan 05 23:32:05 crc kubenswrapper[4910]: I0105 23:32:05.824275 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cdvlz\" (UniqueName: \"kubernetes.io/projected/f48ced9f-d6fe-47aa-a986-c81a48b3cd4f-kube-api-access-cdvlz\") pod \"f48ced9f-d6fe-47aa-a986-c81a48b3cd4f\" (UID: \"f48ced9f-d6fe-47aa-a986-c81a48b3cd4f\") " Jan 05 23:32:05 crc kubenswrapper[4910]: I0105 23:32:05.824339 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f48ced9f-d6fe-47aa-a986-c81a48b3cd4f-logs\") pod \"f48ced9f-d6fe-47aa-a986-c81a48b3cd4f\" (UID: \"f48ced9f-d6fe-47aa-a986-c81a48b3cd4f\") " Jan 05 23:32:05 crc kubenswrapper[4910]: I0105 23:32:05.824394 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f48ced9f-d6fe-47aa-a986-c81a48b3cd4f-config-data\") pod \"f48ced9f-d6fe-47aa-a986-c81a48b3cd4f\" (UID: \"f48ced9f-d6fe-47aa-a986-c81a48b3cd4f\") " Jan 05 23:32:05 crc kubenswrapper[4910]: I0105 23:32:05.830060 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f48ced9f-d6fe-47aa-a986-c81a48b3cd4f-logs" (OuterVolumeSpecName: "logs") pod "f48ced9f-d6fe-47aa-a986-c81a48b3cd4f" (UID: "f48ced9f-d6fe-47aa-a986-c81a48b3cd4f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:32:05 crc kubenswrapper[4910]: I0105 23:32:05.830790 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f48ced9f-d6fe-47aa-a986-c81a48b3cd4f-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "f48ced9f-d6fe-47aa-a986-c81a48b3cd4f" (UID: "f48ced9f-d6fe-47aa-a986-c81a48b3cd4f"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:32:05 crc kubenswrapper[4910]: I0105 23:32:05.836573 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f48ced9f-d6fe-47aa-a986-c81a48b3cd4f-kube-api-access-cdvlz" (OuterVolumeSpecName: "kube-api-access-cdvlz") pod "f48ced9f-d6fe-47aa-a986-c81a48b3cd4f" (UID: "f48ced9f-d6fe-47aa-a986-c81a48b3cd4f"). InnerVolumeSpecName "kube-api-access-cdvlz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:32:05 crc kubenswrapper[4910]: I0105 23:32:05.860465 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f48ced9f-d6fe-47aa-a986-c81a48b3cd4f-scripts" (OuterVolumeSpecName: "scripts") pod "f48ced9f-d6fe-47aa-a986-c81a48b3cd4f" (UID: "f48ced9f-d6fe-47aa-a986-c81a48b3cd4f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:32:05 crc kubenswrapper[4910]: I0105 23:32:05.877024 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f48ced9f-d6fe-47aa-a986-c81a48b3cd4f-config-data" (OuterVolumeSpecName: "config-data") pod "f48ced9f-d6fe-47aa-a986-c81a48b3cd4f" (UID: "f48ced9f-d6fe-47aa-a986-c81a48b3cd4f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:32:05 crc kubenswrapper[4910]: I0105 23:32:05.936928 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f48ced9f-d6fe-47aa-a986-c81a48b3cd4f-logs\") on node \"crc\" DevicePath \"\"" Jan 05 23:32:05 crc kubenswrapper[4910]: I0105 23:32:05.936986 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f48ced9f-d6fe-47aa-a986-c81a48b3cd4f-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:32:05 crc kubenswrapper[4910]: I0105 23:32:05.937004 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f48ced9f-d6fe-47aa-a986-c81a48b3cd4f-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:32:05 crc kubenswrapper[4910]: I0105 23:32:05.937014 4910 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f48ced9f-d6fe-47aa-a986-c81a48b3cd4f-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 05 23:32:05 crc kubenswrapper[4910]: I0105 23:32:05.937032 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cdvlz\" (UniqueName: \"kubernetes.io/projected/f48ced9f-d6fe-47aa-a986-c81a48b3cd4f-kube-api-access-cdvlz\") on node \"crc\" DevicePath \"\"" Jan 05 23:32:06 crc kubenswrapper[4910]: I0105 23:32:06.773391 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-59cbc8f477-dh8x8" Jan 05 23:32:06 crc kubenswrapper[4910]: I0105 23:32:06.815562 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-59cbc8f477-dh8x8"] Jan 05 23:32:06 crc kubenswrapper[4910]: I0105 23:32:06.828669 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-59cbc8f477-dh8x8"] Jan 05 23:32:08 crc kubenswrapper[4910]: I0105 23:32:08.736782 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f48ced9f-d6fe-47aa-a986-c81a48b3cd4f" path="/var/lib/kubelet/pods/f48ced9f-d6fe-47aa-a986-c81a48b3cd4f/volumes" Jan 05 23:32:19 crc kubenswrapper[4910]: I0105 23:32:19.723346 4910 scope.go:117] "RemoveContainer" containerID="f0334e4359831541c2f90c0defd8867b10c126be0235edac4acd76d34a0ba514" Jan 05 23:32:19 crc kubenswrapper[4910]: E0105 23:32:19.725880 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:32:28 crc kubenswrapper[4910]: I0105 23:32:28.038961 4910 generic.go:334] "Generic (PLEG): container finished" podID="6b598e3a-4b45-464f-bf19-f029193df5b2" containerID="df4eb3a4d27f36e935e46bb2126946fc01c14e6bb6e50cb38ee64ca4967d7ada" exitCode=137 Jan 05 23:32:28 crc kubenswrapper[4910]: I0105 23:32:28.039102 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-c454dd9b5-768c2" event={"ID":"6b598e3a-4b45-464f-bf19-f029193df5b2","Type":"ContainerDied","Data":"df4eb3a4d27f36e935e46bb2126946fc01c14e6bb6e50cb38ee64ca4967d7ada"} Jan 05 23:32:28 crc kubenswrapper[4910]: I0105 23:32:28.379884 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-c454dd9b5-768c2" Jan 05 23:32:28 crc kubenswrapper[4910]: I0105 23:32:28.524725 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6b598e3a-4b45-464f-bf19-f029193df5b2-horizon-secret-key\") pod \"6b598e3a-4b45-464f-bf19-f029193df5b2\" (UID: \"6b598e3a-4b45-464f-bf19-f029193df5b2\") " Jan 05 23:32:28 crc kubenswrapper[4910]: I0105 23:32:28.524889 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6b598e3a-4b45-464f-bf19-f029193df5b2-logs\") pod \"6b598e3a-4b45-464f-bf19-f029193df5b2\" (UID: \"6b598e3a-4b45-464f-bf19-f029193df5b2\") " Jan 05 23:32:28 crc kubenswrapper[4910]: I0105 23:32:28.526234 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6b598e3a-4b45-464f-bf19-f029193df5b2-config-data\") pod \"6b598e3a-4b45-464f-bf19-f029193df5b2\" (UID: \"6b598e3a-4b45-464f-bf19-f029193df5b2\") " Jan 05 23:32:28 crc kubenswrapper[4910]: I0105 23:32:28.526387 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l79v6\" (UniqueName: \"kubernetes.io/projected/6b598e3a-4b45-464f-bf19-f029193df5b2-kube-api-access-l79v6\") pod \"6b598e3a-4b45-464f-bf19-f029193df5b2\" (UID: \"6b598e3a-4b45-464f-bf19-f029193df5b2\") " Jan 05 23:32:28 crc kubenswrapper[4910]: I0105 23:32:28.526431 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6b598e3a-4b45-464f-bf19-f029193df5b2-scripts\") pod \"6b598e3a-4b45-464f-bf19-f029193df5b2\" (UID: \"6b598e3a-4b45-464f-bf19-f029193df5b2\") " Jan 05 23:32:28 crc kubenswrapper[4910]: I0105 23:32:28.527369 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b598e3a-4b45-464f-bf19-f029193df5b2-logs" (OuterVolumeSpecName: "logs") pod "6b598e3a-4b45-464f-bf19-f029193df5b2" (UID: "6b598e3a-4b45-464f-bf19-f029193df5b2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:32:28 crc kubenswrapper[4910]: I0105 23:32:28.530805 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b598e3a-4b45-464f-bf19-f029193df5b2-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "6b598e3a-4b45-464f-bf19-f029193df5b2" (UID: "6b598e3a-4b45-464f-bf19-f029193df5b2"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:32:28 crc kubenswrapper[4910]: I0105 23:32:28.537330 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b598e3a-4b45-464f-bf19-f029193df5b2-kube-api-access-l79v6" (OuterVolumeSpecName: "kube-api-access-l79v6") pod "6b598e3a-4b45-464f-bf19-f029193df5b2" (UID: "6b598e3a-4b45-464f-bf19-f029193df5b2"). InnerVolumeSpecName "kube-api-access-l79v6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:32:28 crc kubenswrapper[4910]: I0105 23:32:28.552457 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6b598e3a-4b45-464f-bf19-f029193df5b2-scripts" (OuterVolumeSpecName: "scripts") pod "6b598e3a-4b45-464f-bf19-f029193df5b2" (UID: "6b598e3a-4b45-464f-bf19-f029193df5b2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:32:28 crc kubenswrapper[4910]: I0105 23:32:28.574281 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6b598e3a-4b45-464f-bf19-f029193df5b2-config-data" (OuterVolumeSpecName: "config-data") pod "6b598e3a-4b45-464f-bf19-f029193df5b2" (UID: "6b598e3a-4b45-464f-bf19-f029193df5b2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:32:28 crc kubenswrapper[4910]: I0105 23:32:28.628793 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l79v6\" (UniqueName: \"kubernetes.io/projected/6b598e3a-4b45-464f-bf19-f029193df5b2-kube-api-access-l79v6\") on node \"crc\" DevicePath \"\"" Jan 05 23:32:28 crc kubenswrapper[4910]: I0105 23:32:28.628826 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6b598e3a-4b45-464f-bf19-f029193df5b2-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:32:28 crc kubenswrapper[4910]: I0105 23:32:28.628836 4910 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6b598e3a-4b45-464f-bf19-f029193df5b2-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 05 23:32:28 crc kubenswrapper[4910]: I0105 23:32:28.628844 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6b598e3a-4b45-464f-bf19-f029193df5b2-logs\") on node \"crc\" DevicePath \"\"" Jan 05 23:32:28 crc kubenswrapper[4910]: I0105 23:32:28.628853 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6b598e3a-4b45-464f-bf19-f029193df5b2-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:32:29 crc kubenswrapper[4910]: I0105 23:32:29.055110 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-c454dd9b5-768c2" event={"ID":"6b598e3a-4b45-464f-bf19-f029193df5b2","Type":"ContainerDied","Data":"f23bea47b1c56f73aae769a8f3eee530fd0ab536ffc2c3421fc0ccde82230e34"} Jan 05 23:32:29 crc kubenswrapper[4910]: I0105 23:32:29.055241 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-c454dd9b5-768c2" Jan 05 23:32:29 crc kubenswrapper[4910]: I0105 23:32:29.055277 4910 scope.go:117] "RemoveContainer" containerID="476dc7ee26dd9e0d5b69320a76909889d1e41864a12d7c07102ecd2460981394" Jan 05 23:32:29 crc kubenswrapper[4910]: I0105 23:32:29.095876 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-c454dd9b5-768c2"] Jan 05 23:32:29 crc kubenswrapper[4910]: I0105 23:32:29.111735 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-c454dd9b5-768c2"] Jan 05 23:32:29 crc kubenswrapper[4910]: I0105 23:32:29.347176 4910 scope.go:117] "RemoveContainer" containerID="df4eb3a4d27f36e935e46bb2126946fc01c14e6bb6e50cb38ee64ca4967d7ada" Jan 05 23:32:30 crc kubenswrapper[4910]: I0105 23:32:30.748325 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b598e3a-4b45-464f-bf19-f029193df5b2" path="/var/lib/kubelet/pods/6b598e3a-4b45-464f-bf19-f029193df5b2/volumes" Jan 05 23:32:32 crc kubenswrapper[4910]: I0105 23:32:32.076962 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-gzbt7"] Jan 05 23:32:32 crc kubenswrapper[4910]: I0105 23:32:32.100245 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-3695-account-create-update-dlx7s"] Jan 05 23:32:32 crc kubenswrapper[4910]: I0105 23:32:32.113035 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-gzbt7"] Jan 05 23:32:32 crc kubenswrapper[4910]: I0105 23:32:32.123736 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-3695-account-create-update-dlx7s"] Jan 05 23:32:32 crc kubenswrapper[4910]: I0105 23:32:32.723070 4910 scope.go:117] "RemoveContainer" containerID="f0334e4359831541c2f90c0defd8867b10c126be0235edac4acd76d34a0ba514" Jan 05 23:32:32 crc kubenswrapper[4910]: E0105 23:32:32.723602 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:32:32 crc kubenswrapper[4910]: I0105 23:32:32.738251 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1" path="/var/lib/kubelet/pods/1e5d0bca-1969-4a5e-bd7d-c07c5fa1a7f1/volumes" Jan 05 23:32:32 crc kubenswrapper[4910]: I0105 23:32:32.739043 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc30a657-c8fd-41db-a82d-fd41a721b4d7" path="/var/lib/kubelet/pods/cc30a657-c8fd-41db-a82d-fd41a721b4d7/volumes" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.043598 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-skmcr"] Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.064032 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-skmcr"] Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.170934 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-68557c6fd9-rbl2l"] Jan 05 23:32:41 crc kubenswrapper[4910]: E0105 23:32:41.171349 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58845880-bb82-4fb8-bc7d-19df1f75f136" containerName="extract-content" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.171365 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="58845880-bb82-4fb8-bc7d-19df1f75f136" containerName="extract-content" Jan 05 23:32:41 crc kubenswrapper[4910]: E0105 23:32:41.171376 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58845880-bb82-4fb8-bc7d-19df1f75f136" containerName="registry-server" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.171401 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="58845880-bb82-4fb8-bc7d-19df1f75f136" containerName="registry-server" Jan 05 23:32:41 crc kubenswrapper[4910]: E0105 23:32:41.171414 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b598e3a-4b45-464f-bf19-f029193df5b2" containerName="horizon" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.171420 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b598e3a-4b45-464f-bf19-f029193df5b2" containerName="horizon" Jan 05 23:32:41 crc kubenswrapper[4910]: E0105 23:32:41.171433 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f48ced9f-d6fe-47aa-a986-c81a48b3cd4f" containerName="horizon" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.171440 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f48ced9f-d6fe-47aa-a986-c81a48b3cd4f" containerName="horizon" Jan 05 23:32:41 crc kubenswrapper[4910]: E0105 23:32:41.171454 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f48ced9f-d6fe-47aa-a986-c81a48b3cd4f" containerName="horizon-log" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.171459 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f48ced9f-d6fe-47aa-a986-c81a48b3cd4f" containerName="horizon-log" Jan 05 23:32:41 crc kubenswrapper[4910]: E0105 23:32:41.171478 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3b67709-a5e7-459a-87b0-d7d266ab60dd" containerName="extract-content" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.171486 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3b67709-a5e7-459a-87b0-d7d266ab60dd" containerName="extract-content" Jan 05 23:32:41 crc kubenswrapper[4910]: E0105 23:32:41.171494 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58845880-bb82-4fb8-bc7d-19df1f75f136" containerName="extract-utilities" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.171500 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="58845880-bb82-4fb8-bc7d-19df1f75f136" containerName="extract-utilities" Jan 05 23:32:41 crc kubenswrapper[4910]: E0105 23:32:41.171511 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b598e3a-4b45-464f-bf19-f029193df5b2" containerName="horizon-log" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.171517 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b598e3a-4b45-464f-bf19-f029193df5b2" containerName="horizon-log" Jan 05 23:32:41 crc kubenswrapper[4910]: E0105 23:32:41.171530 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3b67709-a5e7-459a-87b0-d7d266ab60dd" containerName="registry-server" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.171535 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3b67709-a5e7-459a-87b0-d7d266ab60dd" containerName="registry-server" Jan 05 23:32:41 crc kubenswrapper[4910]: E0105 23:32:41.171549 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3b67709-a5e7-459a-87b0-d7d266ab60dd" containerName="extract-utilities" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.171555 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3b67709-a5e7-459a-87b0-d7d266ab60dd" containerName="extract-utilities" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.171736 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f48ced9f-d6fe-47aa-a986-c81a48b3cd4f" containerName="horizon-log" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.171749 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b598e3a-4b45-464f-bf19-f029193df5b2" containerName="horizon-log" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.171763 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f48ced9f-d6fe-47aa-a986-c81a48b3cd4f" containerName="horizon" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.171773 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="58845880-bb82-4fb8-bc7d-19df1f75f136" containerName="registry-server" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.171785 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3b67709-a5e7-459a-87b0-d7d266ab60dd" containerName="registry-server" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.171792 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b598e3a-4b45-464f-bf19-f029193df5b2" containerName="horizon" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.172751 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68557c6fd9-rbl2l" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.235367 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-68557c6fd9-rbl2l"] Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.266206 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8f15c3c0-9df1-4999-80e0-d3ebd88a76a8-scripts\") pod \"horizon-68557c6fd9-rbl2l\" (UID: \"8f15c3c0-9df1-4999-80e0-d3ebd88a76a8\") " pod="openstack/horizon-68557c6fd9-rbl2l" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.266315 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8f15c3c0-9df1-4999-80e0-d3ebd88a76a8-config-data\") pod \"horizon-68557c6fd9-rbl2l\" (UID: \"8f15c3c0-9df1-4999-80e0-d3ebd88a76a8\") " pod="openstack/horizon-68557c6fd9-rbl2l" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.266376 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c22qc\" (UniqueName: \"kubernetes.io/projected/8f15c3c0-9df1-4999-80e0-d3ebd88a76a8-kube-api-access-c22qc\") pod \"horizon-68557c6fd9-rbl2l\" (UID: \"8f15c3c0-9df1-4999-80e0-d3ebd88a76a8\") " pod="openstack/horizon-68557c6fd9-rbl2l" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.266427 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8f15c3c0-9df1-4999-80e0-d3ebd88a76a8-logs\") pod \"horizon-68557c6fd9-rbl2l\" (UID: \"8f15c3c0-9df1-4999-80e0-d3ebd88a76a8\") " pod="openstack/horizon-68557c6fd9-rbl2l" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.266481 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/8f15c3c0-9df1-4999-80e0-d3ebd88a76a8-horizon-secret-key\") pod \"horizon-68557c6fd9-rbl2l\" (UID: \"8f15c3c0-9df1-4999-80e0-d3ebd88a76a8\") " pod="openstack/horizon-68557c6fd9-rbl2l" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.368417 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8f15c3c0-9df1-4999-80e0-d3ebd88a76a8-logs\") pod \"horizon-68557c6fd9-rbl2l\" (UID: \"8f15c3c0-9df1-4999-80e0-d3ebd88a76a8\") " pod="openstack/horizon-68557c6fd9-rbl2l" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.368488 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/8f15c3c0-9df1-4999-80e0-d3ebd88a76a8-horizon-secret-key\") pod \"horizon-68557c6fd9-rbl2l\" (UID: \"8f15c3c0-9df1-4999-80e0-d3ebd88a76a8\") " pod="openstack/horizon-68557c6fd9-rbl2l" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.368560 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8f15c3c0-9df1-4999-80e0-d3ebd88a76a8-scripts\") pod \"horizon-68557c6fd9-rbl2l\" (UID: \"8f15c3c0-9df1-4999-80e0-d3ebd88a76a8\") " pod="openstack/horizon-68557c6fd9-rbl2l" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.368640 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8f15c3c0-9df1-4999-80e0-d3ebd88a76a8-config-data\") pod \"horizon-68557c6fd9-rbl2l\" (UID: \"8f15c3c0-9df1-4999-80e0-d3ebd88a76a8\") " pod="openstack/horizon-68557c6fd9-rbl2l" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.368716 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c22qc\" (UniqueName: \"kubernetes.io/projected/8f15c3c0-9df1-4999-80e0-d3ebd88a76a8-kube-api-access-c22qc\") pod \"horizon-68557c6fd9-rbl2l\" (UID: \"8f15c3c0-9df1-4999-80e0-d3ebd88a76a8\") " pod="openstack/horizon-68557c6fd9-rbl2l" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.368968 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8f15c3c0-9df1-4999-80e0-d3ebd88a76a8-logs\") pod \"horizon-68557c6fd9-rbl2l\" (UID: \"8f15c3c0-9df1-4999-80e0-d3ebd88a76a8\") " pod="openstack/horizon-68557c6fd9-rbl2l" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.369477 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8f15c3c0-9df1-4999-80e0-d3ebd88a76a8-scripts\") pod \"horizon-68557c6fd9-rbl2l\" (UID: \"8f15c3c0-9df1-4999-80e0-d3ebd88a76a8\") " pod="openstack/horizon-68557c6fd9-rbl2l" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.370602 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8f15c3c0-9df1-4999-80e0-d3ebd88a76a8-config-data\") pod \"horizon-68557c6fd9-rbl2l\" (UID: \"8f15c3c0-9df1-4999-80e0-d3ebd88a76a8\") " pod="openstack/horizon-68557c6fd9-rbl2l" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.377439 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/8f15c3c0-9df1-4999-80e0-d3ebd88a76a8-horizon-secret-key\") pod \"horizon-68557c6fd9-rbl2l\" (UID: \"8f15c3c0-9df1-4999-80e0-d3ebd88a76a8\") " pod="openstack/horizon-68557c6fd9-rbl2l" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.386547 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c22qc\" (UniqueName: \"kubernetes.io/projected/8f15c3c0-9df1-4999-80e0-d3ebd88a76a8-kube-api-access-c22qc\") pod \"horizon-68557c6fd9-rbl2l\" (UID: \"8f15c3c0-9df1-4999-80e0-d3ebd88a76a8\") " pod="openstack/horizon-68557c6fd9-rbl2l" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.500312 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68557c6fd9-rbl2l" Jan 05 23:32:41 crc kubenswrapper[4910]: I0105 23:32:41.940401 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-68557c6fd9-rbl2l"] Jan 05 23:32:42 crc kubenswrapper[4910]: I0105 23:32:42.240808 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68557c6fd9-rbl2l" event={"ID":"8f15c3c0-9df1-4999-80e0-d3ebd88a76a8","Type":"ContainerStarted","Data":"c9f9bee39675c13822cda0616215fe6814161493273f880d4ff63d1311c75e34"} Jan 05 23:32:42 crc kubenswrapper[4910]: I0105 23:32:42.240853 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68557c6fd9-rbl2l" event={"ID":"8f15c3c0-9df1-4999-80e0-d3ebd88a76a8","Type":"ContainerStarted","Data":"f6907cf8f586a2609e6a5b6d9e1f7f6ae6010b29501fff838d3be5379c038c6f"} Jan 05 23:32:42 crc kubenswrapper[4910]: I0105 23:32:42.720473 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-create-tvm6d"] Jan 05 23:32:42 crc kubenswrapper[4910]: I0105 23:32:42.722680 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-tvm6d" Jan 05 23:32:42 crc kubenswrapper[4910]: I0105 23:32:42.743399 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278" path="/var/lib/kubelet/pods/fb4c9bcc-7a5a-4e3d-b8af-a16560ab2278/volumes" Jan 05 23:32:42 crc kubenswrapper[4910]: I0105 23:32:42.744163 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-tvm6d"] Jan 05 23:32:42 crc kubenswrapper[4910]: I0105 23:32:42.791100 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-2888-account-create-update-5gtwz"] Jan 05 23:32:42 crc kubenswrapper[4910]: I0105 23:32:42.792491 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-2888-account-create-update-5gtwz" Jan 05 23:32:42 crc kubenswrapper[4910]: I0105 23:32:42.795758 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-db-secret" Jan 05 23:32:42 crc kubenswrapper[4910]: I0105 23:32:42.796943 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bzz5k\" (UniqueName: \"kubernetes.io/projected/ac8e2000-282c-4602-b740-b834d9d58e0f-kube-api-access-bzz5k\") pod \"heat-2888-account-create-update-5gtwz\" (UID: \"ac8e2000-282c-4602-b740-b834d9d58e0f\") " pod="openstack/heat-2888-account-create-update-5gtwz" Jan 05 23:32:42 crc kubenswrapper[4910]: I0105 23:32:42.797004 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ac8e2000-282c-4602-b740-b834d9d58e0f-operator-scripts\") pod \"heat-2888-account-create-update-5gtwz\" (UID: \"ac8e2000-282c-4602-b740-b834d9d58e0f\") " pod="openstack/heat-2888-account-create-update-5gtwz" Jan 05 23:32:42 crc kubenswrapper[4910]: I0105 23:32:42.797029 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11faec61-a084-4bfb-b9b4-06fe57b34754-operator-scripts\") pod \"heat-db-create-tvm6d\" (UID: \"11faec61-a084-4bfb-b9b4-06fe57b34754\") " pod="openstack/heat-db-create-tvm6d" Jan 05 23:32:42 crc kubenswrapper[4910]: I0105 23:32:42.797102 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2zth\" (UniqueName: \"kubernetes.io/projected/11faec61-a084-4bfb-b9b4-06fe57b34754-kube-api-access-t2zth\") pod \"heat-db-create-tvm6d\" (UID: \"11faec61-a084-4bfb-b9b4-06fe57b34754\") " pod="openstack/heat-db-create-tvm6d" Jan 05 23:32:42 crc kubenswrapper[4910]: I0105 23:32:42.798415 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-2888-account-create-update-5gtwz"] Jan 05 23:32:42 crc kubenswrapper[4910]: I0105 23:32:42.898905 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11faec61-a084-4bfb-b9b4-06fe57b34754-operator-scripts\") pod \"heat-db-create-tvm6d\" (UID: \"11faec61-a084-4bfb-b9b4-06fe57b34754\") " pod="openstack/heat-db-create-tvm6d" Jan 05 23:32:42 crc kubenswrapper[4910]: I0105 23:32:42.898954 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2zth\" (UniqueName: \"kubernetes.io/projected/11faec61-a084-4bfb-b9b4-06fe57b34754-kube-api-access-t2zth\") pod \"heat-db-create-tvm6d\" (UID: \"11faec61-a084-4bfb-b9b4-06fe57b34754\") " pod="openstack/heat-db-create-tvm6d" Jan 05 23:32:42 crc kubenswrapper[4910]: I0105 23:32:42.899105 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bzz5k\" (UniqueName: \"kubernetes.io/projected/ac8e2000-282c-4602-b740-b834d9d58e0f-kube-api-access-bzz5k\") pod \"heat-2888-account-create-update-5gtwz\" (UID: \"ac8e2000-282c-4602-b740-b834d9d58e0f\") " pod="openstack/heat-2888-account-create-update-5gtwz" Jan 05 23:32:42 crc kubenswrapper[4910]: I0105 23:32:42.899170 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ac8e2000-282c-4602-b740-b834d9d58e0f-operator-scripts\") pod \"heat-2888-account-create-update-5gtwz\" (UID: \"ac8e2000-282c-4602-b740-b834d9d58e0f\") " pod="openstack/heat-2888-account-create-update-5gtwz" Jan 05 23:32:42 crc kubenswrapper[4910]: I0105 23:32:42.899819 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11faec61-a084-4bfb-b9b4-06fe57b34754-operator-scripts\") pod \"heat-db-create-tvm6d\" (UID: \"11faec61-a084-4bfb-b9b4-06fe57b34754\") " pod="openstack/heat-db-create-tvm6d" Jan 05 23:32:42 crc kubenswrapper[4910]: I0105 23:32:42.899890 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ac8e2000-282c-4602-b740-b834d9d58e0f-operator-scripts\") pod \"heat-2888-account-create-update-5gtwz\" (UID: \"ac8e2000-282c-4602-b740-b834d9d58e0f\") " pod="openstack/heat-2888-account-create-update-5gtwz" Jan 05 23:32:42 crc kubenswrapper[4910]: I0105 23:32:42.917977 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bzz5k\" (UniqueName: \"kubernetes.io/projected/ac8e2000-282c-4602-b740-b834d9d58e0f-kube-api-access-bzz5k\") pod \"heat-2888-account-create-update-5gtwz\" (UID: \"ac8e2000-282c-4602-b740-b834d9d58e0f\") " pod="openstack/heat-2888-account-create-update-5gtwz" Jan 05 23:32:42 crc kubenswrapper[4910]: I0105 23:32:42.918820 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2zth\" (UniqueName: \"kubernetes.io/projected/11faec61-a084-4bfb-b9b4-06fe57b34754-kube-api-access-t2zth\") pod \"heat-db-create-tvm6d\" (UID: \"11faec61-a084-4bfb-b9b4-06fe57b34754\") " pod="openstack/heat-db-create-tvm6d" Jan 05 23:32:43 crc kubenswrapper[4910]: I0105 23:32:43.062390 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-tvm6d" Jan 05 23:32:43 crc kubenswrapper[4910]: I0105 23:32:43.115659 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-2888-account-create-update-5gtwz" Jan 05 23:32:43 crc kubenswrapper[4910]: I0105 23:32:43.322892 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68557c6fd9-rbl2l" event={"ID":"8f15c3c0-9df1-4999-80e0-d3ebd88a76a8","Type":"ContainerStarted","Data":"e1fcc332bdb47dd45f513bd9e5e0387e524cc4facfb163c7bb0fab27273356bf"} Jan 05 23:32:43 crc kubenswrapper[4910]: I0105 23:32:43.342853 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-68557c6fd9-rbl2l" podStartSLOduration=2.342837203 podStartE2EDuration="2.342837203s" podCreationTimestamp="2026-01-05 23:32:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:32:43.339567755 +0000 UTC m=+6094.917065425" watchObservedRunningTime="2026-01-05 23:32:43.342837203 +0000 UTC m=+6094.920334873" Jan 05 23:32:43 crc kubenswrapper[4910]: I0105 23:32:43.623268 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-tvm6d"] Jan 05 23:32:43 crc kubenswrapper[4910]: W0105 23:32:43.631967 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod11faec61_a084_4bfb_b9b4_06fe57b34754.slice/crio-f6ee68fc21083978f3b8534462bd0449b725d54bd06f144a6aecb806aa07bfe1 WatchSource:0}: Error finding container f6ee68fc21083978f3b8534462bd0449b725d54bd06f144a6aecb806aa07bfe1: Status 404 returned error can't find the container with id f6ee68fc21083978f3b8534462bd0449b725d54bd06f144a6aecb806aa07bfe1 Jan 05 23:32:43 crc kubenswrapper[4910]: W0105 23:32:43.714025 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podac8e2000_282c_4602_b740_b834d9d58e0f.slice/crio-618571e300bd79efe9eb9892f420c43e4a042971924540add939bdd3cdbfbbfe WatchSource:0}: Error finding container 618571e300bd79efe9eb9892f420c43e4a042971924540add939bdd3cdbfbbfe: Status 404 returned error can't find the container with id 618571e300bd79efe9eb9892f420c43e4a042971924540add939bdd3cdbfbbfe Jan 05 23:32:43 crc kubenswrapper[4910]: I0105 23:32:43.714422 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-2888-account-create-update-5gtwz"] Jan 05 23:32:44 crc kubenswrapper[4910]: I0105 23:32:44.334643 4910 generic.go:334] "Generic (PLEG): container finished" podID="ac8e2000-282c-4602-b740-b834d9d58e0f" containerID="321b41a21f66f4581c85633a8ffcc12bc2af4ece73e8d4ba62c4dbb4ff7d49e0" exitCode=0 Jan 05 23:32:44 crc kubenswrapper[4910]: I0105 23:32:44.335048 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-2888-account-create-update-5gtwz" event={"ID":"ac8e2000-282c-4602-b740-b834d9d58e0f","Type":"ContainerDied","Data":"321b41a21f66f4581c85633a8ffcc12bc2af4ece73e8d4ba62c4dbb4ff7d49e0"} Jan 05 23:32:44 crc kubenswrapper[4910]: I0105 23:32:44.335092 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-2888-account-create-update-5gtwz" event={"ID":"ac8e2000-282c-4602-b740-b834d9d58e0f","Type":"ContainerStarted","Data":"618571e300bd79efe9eb9892f420c43e4a042971924540add939bdd3cdbfbbfe"} Jan 05 23:32:44 crc kubenswrapper[4910]: I0105 23:32:44.337180 4910 generic.go:334] "Generic (PLEG): container finished" podID="11faec61-a084-4bfb-b9b4-06fe57b34754" containerID="37be7f31573ec3ed5152eeef8f5e33a9c5ad62f9746fc5cf9ea8ea3f39207f7f" exitCode=0 Jan 05 23:32:44 crc kubenswrapper[4910]: I0105 23:32:44.338384 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-tvm6d" event={"ID":"11faec61-a084-4bfb-b9b4-06fe57b34754","Type":"ContainerDied","Data":"37be7f31573ec3ed5152eeef8f5e33a9c5ad62f9746fc5cf9ea8ea3f39207f7f"} Jan 05 23:32:44 crc kubenswrapper[4910]: I0105 23:32:44.338428 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-tvm6d" event={"ID":"11faec61-a084-4bfb-b9b4-06fe57b34754","Type":"ContainerStarted","Data":"f6ee68fc21083978f3b8534462bd0449b725d54bd06f144a6aecb806aa07bfe1"} Jan 05 23:32:46 crc kubenswrapper[4910]: I0105 23:32:45.890682 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-2888-account-create-update-5gtwz" Jan 05 23:32:46 crc kubenswrapper[4910]: I0105 23:32:45.897141 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-tvm6d" Jan 05 23:32:46 crc kubenswrapper[4910]: I0105 23:32:45.976051 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11faec61-a084-4bfb-b9b4-06fe57b34754-operator-scripts\") pod \"11faec61-a084-4bfb-b9b4-06fe57b34754\" (UID: \"11faec61-a084-4bfb-b9b4-06fe57b34754\") " Jan 05 23:32:46 crc kubenswrapper[4910]: I0105 23:32:45.976202 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bzz5k\" (UniqueName: \"kubernetes.io/projected/ac8e2000-282c-4602-b740-b834d9d58e0f-kube-api-access-bzz5k\") pod \"ac8e2000-282c-4602-b740-b834d9d58e0f\" (UID: \"ac8e2000-282c-4602-b740-b834d9d58e0f\") " Jan 05 23:32:46 crc kubenswrapper[4910]: I0105 23:32:45.976256 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ac8e2000-282c-4602-b740-b834d9d58e0f-operator-scripts\") pod \"ac8e2000-282c-4602-b740-b834d9d58e0f\" (UID: \"ac8e2000-282c-4602-b740-b834d9d58e0f\") " Jan 05 23:32:46 crc kubenswrapper[4910]: I0105 23:32:45.976342 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t2zth\" (UniqueName: \"kubernetes.io/projected/11faec61-a084-4bfb-b9b4-06fe57b34754-kube-api-access-t2zth\") pod \"11faec61-a084-4bfb-b9b4-06fe57b34754\" (UID: \"11faec61-a084-4bfb-b9b4-06fe57b34754\") " Jan 05 23:32:46 crc kubenswrapper[4910]: I0105 23:32:45.979419 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ac8e2000-282c-4602-b740-b834d9d58e0f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ac8e2000-282c-4602-b740-b834d9d58e0f" (UID: "ac8e2000-282c-4602-b740-b834d9d58e0f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:32:46 crc kubenswrapper[4910]: I0105 23:32:45.979830 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/11faec61-a084-4bfb-b9b4-06fe57b34754-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "11faec61-a084-4bfb-b9b4-06fe57b34754" (UID: "11faec61-a084-4bfb-b9b4-06fe57b34754"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:32:46 crc kubenswrapper[4910]: I0105 23:32:45.994441 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11faec61-a084-4bfb-b9b4-06fe57b34754-kube-api-access-t2zth" (OuterVolumeSpecName: "kube-api-access-t2zth") pod "11faec61-a084-4bfb-b9b4-06fe57b34754" (UID: "11faec61-a084-4bfb-b9b4-06fe57b34754"). InnerVolumeSpecName "kube-api-access-t2zth". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:32:46 crc kubenswrapper[4910]: I0105 23:32:46.000297 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac8e2000-282c-4602-b740-b834d9d58e0f-kube-api-access-bzz5k" (OuterVolumeSpecName: "kube-api-access-bzz5k") pod "ac8e2000-282c-4602-b740-b834d9d58e0f" (UID: "ac8e2000-282c-4602-b740-b834d9d58e0f"). InnerVolumeSpecName "kube-api-access-bzz5k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:32:46 crc kubenswrapper[4910]: I0105 23:32:46.077608 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11faec61-a084-4bfb-b9b4-06fe57b34754-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:32:46 crc kubenswrapper[4910]: I0105 23:32:46.077639 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bzz5k\" (UniqueName: \"kubernetes.io/projected/ac8e2000-282c-4602-b740-b834d9d58e0f-kube-api-access-bzz5k\") on node \"crc\" DevicePath \"\"" Jan 05 23:32:46 crc kubenswrapper[4910]: I0105 23:32:46.077649 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ac8e2000-282c-4602-b740-b834d9d58e0f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:32:46 crc kubenswrapper[4910]: I0105 23:32:46.077657 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t2zth\" (UniqueName: \"kubernetes.io/projected/11faec61-a084-4bfb-b9b4-06fe57b34754-kube-api-access-t2zth\") on node \"crc\" DevicePath \"\"" Jan 05 23:32:46 crc kubenswrapper[4910]: I0105 23:32:46.358661 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-tvm6d" Jan 05 23:32:46 crc kubenswrapper[4910]: I0105 23:32:46.358659 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-tvm6d" event={"ID":"11faec61-a084-4bfb-b9b4-06fe57b34754","Type":"ContainerDied","Data":"f6ee68fc21083978f3b8534462bd0449b725d54bd06f144a6aecb806aa07bfe1"} Jan 05 23:32:46 crc kubenswrapper[4910]: I0105 23:32:46.358825 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f6ee68fc21083978f3b8534462bd0449b725d54bd06f144a6aecb806aa07bfe1" Jan 05 23:32:46 crc kubenswrapper[4910]: I0105 23:32:46.360058 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-2888-account-create-update-5gtwz" event={"ID":"ac8e2000-282c-4602-b740-b834d9d58e0f","Type":"ContainerDied","Data":"618571e300bd79efe9eb9892f420c43e4a042971924540add939bdd3cdbfbbfe"} Jan 05 23:32:46 crc kubenswrapper[4910]: I0105 23:32:46.360097 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="618571e300bd79efe9eb9892f420c43e4a042971924540add939bdd3cdbfbbfe" Jan 05 23:32:46 crc kubenswrapper[4910]: I0105 23:32:46.360184 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-2888-account-create-update-5gtwz" Jan 05 23:32:46 crc kubenswrapper[4910]: I0105 23:32:46.721721 4910 scope.go:117] "RemoveContainer" containerID="f0334e4359831541c2f90c0defd8867b10c126be0235edac4acd76d34a0ba514" Jan 05 23:32:46 crc kubenswrapper[4910]: E0105 23:32:46.721994 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:32:47 crc kubenswrapper[4910]: I0105 23:32:47.033187 4910 scope.go:117] "RemoveContainer" containerID="01c68606b8dab6983f3942011551c6db4a6476ce71faad0d3363dfb91f7b354a" Jan 05 23:32:47 crc kubenswrapper[4910]: I0105 23:32:47.083548 4910 scope.go:117] "RemoveContainer" containerID="104c3a4d156d738369debee93e3a01cea118fe5a08e323bccbacb1b2e82e3840" Jan 05 23:32:47 crc kubenswrapper[4910]: I0105 23:32:47.145253 4910 scope.go:117] "RemoveContainer" containerID="a7eb2dea7c54212684db87b3c5d5a14b0740f3d86b797d329dbafe88ddb27fb2" Jan 05 23:32:47 crc kubenswrapper[4910]: I0105 23:32:47.173110 4910 scope.go:117] "RemoveContainer" containerID="5e0732739ff1284f5f8e1dd3ae708133804fcfe53b1dfecb1053fed6370e748c" Jan 05 23:32:47 crc kubenswrapper[4910]: I0105 23:32:47.894365 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-bxg5l"] Jan 05 23:32:47 crc kubenswrapper[4910]: E0105 23:32:47.894906 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac8e2000-282c-4602-b740-b834d9d58e0f" containerName="mariadb-account-create-update" Jan 05 23:32:47 crc kubenswrapper[4910]: I0105 23:32:47.894922 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac8e2000-282c-4602-b740-b834d9d58e0f" containerName="mariadb-account-create-update" Jan 05 23:32:47 crc kubenswrapper[4910]: E0105 23:32:47.894977 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11faec61-a084-4bfb-b9b4-06fe57b34754" containerName="mariadb-database-create" Jan 05 23:32:47 crc kubenswrapper[4910]: I0105 23:32:47.894986 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="11faec61-a084-4bfb-b9b4-06fe57b34754" containerName="mariadb-database-create" Jan 05 23:32:47 crc kubenswrapper[4910]: I0105 23:32:47.895276 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="11faec61-a084-4bfb-b9b4-06fe57b34754" containerName="mariadb-database-create" Jan 05 23:32:47 crc kubenswrapper[4910]: I0105 23:32:47.895302 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac8e2000-282c-4602-b740-b834d9d58e0f" containerName="mariadb-account-create-update" Jan 05 23:32:47 crc kubenswrapper[4910]: I0105 23:32:47.896195 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-bxg5l" Jan 05 23:32:47 crc kubenswrapper[4910]: I0105 23:32:47.912298 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-bxg5l"] Jan 05 23:32:47 crc kubenswrapper[4910]: I0105 23:32:47.913024 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Jan 05 23:32:47 crc kubenswrapper[4910]: I0105 23:32:47.915800 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-57rvc" Jan 05 23:32:47 crc kubenswrapper[4910]: I0105 23:32:47.921639 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72257aab-c18e-432e-a662-73955418e381-combined-ca-bundle\") pod \"heat-db-sync-bxg5l\" (UID: \"72257aab-c18e-432e-a662-73955418e381\") " pod="openstack/heat-db-sync-bxg5l" Jan 05 23:32:47 crc kubenswrapper[4910]: I0105 23:32:47.921710 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54s4r\" (UniqueName: \"kubernetes.io/projected/72257aab-c18e-432e-a662-73955418e381-kube-api-access-54s4r\") pod \"heat-db-sync-bxg5l\" (UID: \"72257aab-c18e-432e-a662-73955418e381\") " pod="openstack/heat-db-sync-bxg5l" Jan 05 23:32:47 crc kubenswrapper[4910]: I0105 23:32:47.921821 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/72257aab-c18e-432e-a662-73955418e381-config-data\") pod \"heat-db-sync-bxg5l\" (UID: \"72257aab-c18e-432e-a662-73955418e381\") " pod="openstack/heat-db-sync-bxg5l" Jan 05 23:32:48 crc kubenswrapper[4910]: I0105 23:32:48.024719 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/72257aab-c18e-432e-a662-73955418e381-config-data\") pod \"heat-db-sync-bxg5l\" (UID: \"72257aab-c18e-432e-a662-73955418e381\") " pod="openstack/heat-db-sync-bxg5l" Jan 05 23:32:48 crc kubenswrapper[4910]: I0105 23:32:48.024937 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72257aab-c18e-432e-a662-73955418e381-combined-ca-bundle\") pod \"heat-db-sync-bxg5l\" (UID: \"72257aab-c18e-432e-a662-73955418e381\") " pod="openstack/heat-db-sync-bxg5l" Jan 05 23:32:48 crc kubenswrapper[4910]: I0105 23:32:48.024970 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54s4r\" (UniqueName: \"kubernetes.io/projected/72257aab-c18e-432e-a662-73955418e381-kube-api-access-54s4r\") pod \"heat-db-sync-bxg5l\" (UID: \"72257aab-c18e-432e-a662-73955418e381\") " pod="openstack/heat-db-sync-bxg5l" Jan 05 23:32:48 crc kubenswrapper[4910]: I0105 23:32:48.048080 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72257aab-c18e-432e-a662-73955418e381-combined-ca-bundle\") pod \"heat-db-sync-bxg5l\" (UID: \"72257aab-c18e-432e-a662-73955418e381\") " pod="openstack/heat-db-sync-bxg5l" Jan 05 23:32:48 crc kubenswrapper[4910]: I0105 23:32:48.060258 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/72257aab-c18e-432e-a662-73955418e381-config-data\") pod \"heat-db-sync-bxg5l\" (UID: \"72257aab-c18e-432e-a662-73955418e381\") " pod="openstack/heat-db-sync-bxg5l" Jan 05 23:32:48 crc kubenswrapper[4910]: I0105 23:32:48.107671 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54s4r\" (UniqueName: \"kubernetes.io/projected/72257aab-c18e-432e-a662-73955418e381-kube-api-access-54s4r\") pod \"heat-db-sync-bxg5l\" (UID: \"72257aab-c18e-432e-a662-73955418e381\") " pod="openstack/heat-db-sync-bxg5l" Jan 05 23:32:48 crc kubenswrapper[4910]: I0105 23:32:48.231460 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-bxg5l" Jan 05 23:32:48 crc kubenswrapper[4910]: I0105 23:32:48.718412 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-bxg5l"] Jan 05 23:32:49 crc kubenswrapper[4910]: I0105 23:32:49.402102 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-bxg5l" event={"ID":"72257aab-c18e-432e-a662-73955418e381","Type":"ContainerStarted","Data":"a3f00b01b7ad3318936f0a07a9023e5abe0d5315a0e3093aa49a307f65625da7"} Jan 05 23:32:51 crc kubenswrapper[4910]: I0105 23:32:51.500592 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-68557c6fd9-rbl2l" Jan 05 23:32:51 crc kubenswrapper[4910]: I0105 23:32:51.500974 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-68557c6fd9-rbl2l" Jan 05 23:32:55 crc kubenswrapper[4910]: I0105 23:32:55.462433 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-bxg5l" event={"ID":"72257aab-c18e-432e-a662-73955418e381","Type":"ContainerStarted","Data":"d98e24434f203249285386fb775c8a0b61c19518fffb2cd9c4906a140a466cf3"} Jan 05 23:32:55 crc kubenswrapper[4910]: I0105 23:32:55.487423 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-bxg5l" podStartSLOduration=2.505985049 podStartE2EDuration="8.487402519s" podCreationTimestamp="2026-01-05 23:32:47 +0000 UTC" firstStartedPulling="2026-01-05 23:32:48.720628312 +0000 UTC m=+6100.298126012" lastFinishedPulling="2026-01-05 23:32:54.702045812 +0000 UTC m=+6106.279543482" observedRunningTime="2026-01-05 23:32:55.485289418 +0000 UTC m=+6107.062787088" watchObservedRunningTime="2026-01-05 23:32:55.487402519 +0000 UTC m=+6107.064900199" Jan 05 23:32:57 crc kubenswrapper[4910]: I0105 23:32:57.487775 4910 generic.go:334] "Generic (PLEG): container finished" podID="72257aab-c18e-432e-a662-73955418e381" containerID="d98e24434f203249285386fb775c8a0b61c19518fffb2cd9c4906a140a466cf3" exitCode=0 Jan 05 23:32:57 crc kubenswrapper[4910]: I0105 23:32:57.487906 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-bxg5l" event={"ID":"72257aab-c18e-432e-a662-73955418e381","Type":"ContainerDied","Data":"d98e24434f203249285386fb775c8a0b61c19518fffb2cd9c4906a140a466cf3"} Jan 05 23:32:58 crc kubenswrapper[4910]: I0105 23:32:58.952268 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-bxg5l" Jan 05 23:32:59 crc kubenswrapper[4910]: I0105 23:32:59.029526 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/72257aab-c18e-432e-a662-73955418e381-config-data\") pod \"72257aab-c18e-432e-a662-73955418e381\" (UID: \"72257aab-c18e-432e-a662-73955418e381\") " Jan 05 23:32:59 crc kubenswrapper[4910]: I0105 23:32:59.029736 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72257aab-c18e-432e-a662-73955418e381-combined-ca-bundle\") pod \"72257aab-c18e-432e-a662-73955418e381\" (UID: \"72257aab-c18e-432e-a662-73955418e381\") " Jan 05 23:32:59 crc kubenswrapper[4910]: I0105 23:32:59.029809 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-54s4r\" (UniqueName: \"kubernetes.io/projected/72257aab-c18e-432e-a662-73955418e381-kube-api-access-54s4r\") pod \"72257aab-c18e-432e-a662-73955418e381\" (UID: \"72257aab-c18e-432e-a662-73955418e381\") " Jan 05 23:32:59 crc kubenswrapper[4910]: I0105 23:32:59.038526 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72257aab-c18e-432e-a662-73955418e381-kube-api-access-54s4r" (OuterVolumeSpecName: "kube-api-access-54s4r") pod "72257aab-c18e-432e-a662-73955418e381" (UID: "72257aab-c18e-432e-a662-73955418e381"). InnerVolumeSpecName "kube-api-access-54s4r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:32:59 crc kubenswrapper[4910]: I0105 23:32:59.067404 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72257aab-c18e-432e-a662-73955418e381-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "72257aab-c18e-432e-a662-73955418e381" (UID: "72257aab-c18e-432e-a662-73955418e381"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:32:59 crc kubenswrapper[4910]: I0105 23:32:59.127678 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72257aab-c18e-432e-a662-73955418e381-config-data" (OuterVolumeSpecName: "config-data") pod "72257aab-c18e-432e-a662-73955418e381" (UID: "72257aab-c18e-432e-a662-73955418e381"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:32:59 crc kubenswrapper[4910]: I0105 23:32:59.133069 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/72257aab-c18e-432e-a662-73955418e381-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:32:59 crc kubenswrapper[4910]: I0105 23:32:59.133104 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72257aab-c18e-432e-a662-73955418e381-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:32:59 crc kubenswrapper[4910]: I0105 23:32:59.133131 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-54s4r\" (UniqueName: \"kubernetes.io/projected/72257aab-c18e-432e-a662-73955418e381-kube-api-access-54s4r\") on node \"crc\" DevicePath \"\"" Jan 05 23:32:59 crc kubenswrapper[4910]: I0105 23:32:59.531877 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-bxg5l" event={"ID":"72257aab-c18e-432e-a662-73955418e381","Type":"ContainerDied","Data":"a3f00b01b7ad3318936f0a07a9023e5abe0d5315a0e3093aa49a307f65625da7"} Jan 05 23:32:59 crc kubenswrapper[4910]: I0105 23:32:59.531915 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a3f00b01b7ad3318936f0a07a9023e5abe0d5315a0e3093aa49a307f65625da7" Jan 05 23:32:59 crc kubenswrapper[4910]: I0105 23:32:59.531979 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-bxg5l" Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.679811 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-57bb7c69d4-76xm4"] Jan 05 23:33:00 crc kubenswrapper[4910]: E0105 23:33:00.683667 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72257aab-c18e-432e-a662-73955418e381" containerName="heat-db-sync" Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.683739 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="72257aab-c18e-432e-a662-73955418e381" containerName="heat-db-sync" Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.684344 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="72257aab-c18e-432e-a662-73955418e381" containerName="heat-db-sync" Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.685931 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-57bb7c69d4-76xm4" Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.688061 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-57rvc" Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.688439 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-engine-config-data" Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.688678 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.693642 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-57bb7c69d4-76xm4"] Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.771586 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/70c09a40-83e9-4f29-8718-26e434fd2935-config-data-custom\") pod \"heat-engine-57bb7c69d4-76xm4\" (UID: \"70c09a40-83e9-4f29-8718-26e434fd2935\") " pod="openstack/heat-engine-57bb7c69d4-76xm4" Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.771664 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70c09a40-83e9-4f29-8718-26e434fd2935-combined-ca-bundle\") pod \"heat-engine-57bb7c69d4-76xm4\" (UID: \"70c09a40-83e9-4f29-8718-26e434fd2935\") " pod="openstack/heat-engine-57bb7c69d4-76xm4" Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.771746 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlphl\" (UniqueName: \"kubernetes.io/projected/70c09a40-83e9-4f29-8718-26e434fd2935-kube-api-access-jlphl\") pod \"heat-engine-57bb7c69d4-76xm4\" (UID: \"70c09a40-83e9-4f29-8718-26e434fd2935\") " pod="openstack/heat-engine-57bb7c69d4-76xm4" Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.771796 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70c09a40-83e9-4f29-8718-26e434fd2935-config-data\") pod \"heat-engine-57bb7c69d4-76xm4\" (UID: \"70c09a40-83e9-4f29-8718-26e434fd2935\") " pod="openstack/heat-engine-57bb7c69d4-76xm4" Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.874769 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jlphl\" (UniqueName: \"kubernetes.io/projected/70c09a40-83e9-4f29-8718-26e434fd2935-kube-api-access-jlphl\") pod \"heat-engine-57bb7c69d4-76xm4\" (UID: \"70c09a40-83e9-4f29-8718-26e434fd2935\") " pod="openstack/heat-engine-57bb7c69d4-76xm4" Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.874844 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70c09a40-83e9-4f29-8718-26e434fd2935-config-data\") pod \"heat-engine-57bb7c69d4-76xm4\" (UID: \"70c09a40-83e9-4f29-8718-26e434fd2935\") " pod="openstack/heat-engine-57bb7c69d4-76xm4" Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.874956 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/70c09a40-83e9-4f29-8718-26e434fd2935-config-data-custom\") pod \"heat-engine-57bb7c69d4-76xm4\" (UID: \"70c09a40-83e9-4f29-8718-26e434fd2935\") " pod="openstack/heat-engine-57bb7c69d4-76xm4" Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.875027 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70c09a40-83e9-4f29-8718-26e434fd2935-combined-ca-bundle\") pod \"heat-engine-57bb7c69d4-76xm4\" (UID: \"70c09a40-83e9-4f29-8718-26e434fd2935\") " pod="openstack/heat-engine-57bb7c69d4-76xm4" Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.882171 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-84c9f574d7-j7rfz"] Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.883605 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70c09a40-83e9-4f29-8718-26e434fd2935-combined-ca-bundle\") pod \"heat-engine-57bb7c69d4-76xm4\" (UID: \"70c09a40-83e9-4f29-8718-26e434fd2935\") " pod="openstack/heat-engine-57bb7c69d4-76xm4" Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.883907 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-84c9f574d7-j7rfz" Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.886154 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/70c09a40-83e9-4f29-8718-26e434fd2935-config-data-custom\") pod \"heat-engine-57bb7c69d4-76xm4\" (UID: \"70c09a40-83e9-4f29-8718-26e434fd2935\") " pod="openstack/heat-engine-57bb7c69d4-76xm4" Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.890228 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-cfnapi-config-data" Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.896328 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70c09a40-83e9-4f29-8718-26e434fd2935-config-data\") pod \"heat-engine-57bb7c69d4-76xm4\" (UID: \"70c09a40-83e9-4f29-8718-26e434fd2935\") " pod="openstack/heat-engine-57bb7c69d4-76xm4" Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.902074 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlphl\" (UniqueName: \"kubernetes.io/projected/70c09a40-83e9-4f29-8718-26e434fd2935-kube-api-access-jlphl\") pod \"heat-engine-57bb7c69d4-76xm4\" (UID: \"70c09a40-83e9-4f29-8718-26e434fd2935\") " pod="openstack/heat-engine-57bb7c69d4-76xm4" Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.914833 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-56ffd66999-8rtfw"] Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.916272 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-56ffd66999-8rtfw" Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.917736 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.923976 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-84c9f574d7-j7rfz"] Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.943430 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-56ffd66999-8rtfw"] Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.978783 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4af61181-253f-4ea6-b73c-d0853de6552b-config-data-custom\") pod \"heat-cfnapi-84c9f574d7-j7rfz\" (UID: \"4af61181-253f-4ea6-b73c-d0853de6552b\") " pod="openstack/heat-cfnapi-84c9f574d7-j7rfz" Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.979114 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlkzw\" (UniqueName: \"kubernetes.io/projected/6b6cce0e-2a92-49dd-8c47-0e453905b9ea-kube-api-access-hlkzw\") pod \"heat-api-56ffd66999-8rtfw\" (UID: \"6b6cce0e-2a92-49dd-8c47-0e453905b9ea\") " pod="openstack/heat-api-56ffd66999-8rtfw" Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.979281 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4af61181-253f-4ea6-b73c-d0853de6552b-combined-ca-bundle\") pod \"heat-cfnapi-84c9f574d7-j7rfz\" (UID: \"4af61181-253f-4ea6-b73c-d0853de6552b\") " pod="openstack/heat-cfnapi-84c9f574d7-j7rfz" Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.979372 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6lv29\" (UniqueName: \"kubernetes.io/projected/4af61181-253f-4ea6-b73c-d0853de6552b-kube-api-access-6lv29\") pod \"heat-cfnapi-84c9f574d7-j7rfz\" (UID: \"4af61181-253f-4ea6-b73c-d0853de6552b\") " pod="openstack/heat-cfnapi-84c9f574d7-j7rfz" Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.979468 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6b6cce0e-2a92-49dd-8c47-0e453905b9ea-config-data-custom\") pod \"heat-api-56ffd66999-8rtfw\" (UID: \"6b6cce0e-2a92-49dd-8c47-0e453905b9ea\") " pod="openstack/heat-api-56ffd66999-8rtfw" Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.979548 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b6cce0e-2a92-49dd-8c47-0e453905b9ea-combined-ca-bundle\") pod \"heat-api-56ffd66999-8rtfw\" (UID: \"6b6cce0e-2a92-49dd-8c47-0e453905b9ea\") " pod="openstack/heat-api-56ffd66999-8rtfw" Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.979672 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4af61181-253f-4ea6-b73c-d0853de6552b-config-data\") pod \"heat-cfnapi-84c9f574d7-j7rfz\" (UID: \"4af61181-253f-4ea6-b73c-d0853de6552b\") " pod="openstack/heat-cfnapi-84c9f574d7-j7rfz" Jan 05 23:33:00 crc kubenswrapper[4910]: I0105 23:33:00.979768 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b6cce0e-2a92-49dd-8c47-0e453905b9ea-config-data\") pod \"heat-api-56ffd66999-8rtfw\" (UID: \"6b6cce0e-2a92-49dd-8c47-0e453905b9ea\") " pod="openstack/heat-api-56ffd66999-8rtfw" Jan 05 23:33:01 crc kubenswrapper[4910]: I0105 23:33:01.018592 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-57bb7c69d4-76xm4" Jan 05 23:33:01 crc kubenswrapper[4910]: I0105 23:33:01.082309 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6lv29\" (UniqueName: \"kubernetes.io/projected/4af61181-253f-4ea6-b73c-d0853de6552b-kube-api-access-6lv29\") pod \"heat-cfnapi-84c9f574d7-j7rfz\" (UID: \"4af61181-253f-4ea6-b73c-d0853de6552b\") " pod="openstack/heat-cfnapi-84c9f574d7-j7rfz" Jan 05 23:33:01 crc kubenswrapper[4910]: I0105 23:33:01.082358 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6b6cce0e-2a92-49dd-8c47-0e453905b9ea-config-data-custom\") pod \"heat-api-56ffd66999-8rtfw\" (UID: \"6b6cce0e-2a92-49dd-8c47-0e453905b9ea\") " pod="openstack/heat-api-56ffd66999-8rtfw" Jan 05 23:33:01 crc kubenswrapper[4910]: I0105 23:33:01.082388 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b6cce0e-2a92-49dd-8c47-0e453905b9ea-combined-ca-bundle\") pod \"heat-api-56ffd66999-8rtfw\" (UID: \"6b6cce0e-2a92-49dd-8c47-0e453905b9ea\") " pod="openstack/heat-api-56ffd66999-8rtfw" Jan 05 23:33:01 crc kubenswrapper[4910]: I0105 23:33:01.082440 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4af61181-253f-4ea6-b73c-d0853de6552b-config-data\") pod \"heat-cfnapi-84c9f574d7-j7rfz\" (UID: \"4af61181-253f-4ea6-b73c-d0853de6552b\") " pod="openstack/heat-cfnapi-84c9f574d7-j7rfz" Jan 05 23:33:01 crc kubenswrapper[4910]: I0105 23:33:01.082462 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b6cce0e-2a92-49dd-8c47-0e453905b9ea-config-data\") pod \"heat-api-56ffd66999-8rtfw\" (UID: \"6b6cce0e-2a92-49dd-8c47-0e453905b9ea\") " pod="openstack/heat-api-56ffd66999-8rtfw" Jan 05 23:33:01 crc kubenswrapper[4910]: I0105 23:33:01.082514 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4af61181-253f-4ea6-b73c-d0853de6552b-config-data-custom\") pod \"heat-cfnapi-84c9f574d7-j7rfz\" (UID: \"4af61181-253f-4ea6-b73c-d0853de6552b\") " pod="openstack/heat-cfnapi-84c9f574d7-j7rfz" Jan 05 23:33:01 crc kubenswrapper[4910]: I0105 23:33:01.082542 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlkzw\" (UniqueName: \"kubernetes.io/projected/6b6cce0e-2a92-49dd-8c47-0e453905b9ea-kube-api-access-hlkzw\") pod \"heat-api-56ffd66999-8rtfw\" (UID: \"6b6cce0e-2a92-49dd-8c47-0e453905b9ea\") " pod="openstack/heat-api-56ffd66999-8rtfw" Jan 05 23:33:01 crc kubenswrapper[4910]: I0105 23:33:01.082595 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4af61181-253f-4ea6-b73c-d0853de6552b-combined-ca-bundle\") pod \"heat-cfnapi-84c9f574d7-j7rfz\" (UID: \"4af61181-253f-4ea6-b73c-d0853de6552b\") " pod="openstack/heat-cfnapi-84c9f574d7-j7rfz" Jan 05 23:33:01 crc kubenswrapper[4910]: I0105 23:33:01.088495 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4af61181-253f-4ea6-b73c-d0853de6552b-config-data\") pod \"heat-cfnapi-84c9f574d7-j7rfz\" (UID: \"4af61181-253f-4ea6-b73c-d0853de6552b\") " pod="openstack/heat-cfnapi-84c9f574d7-j7rfz" Jan 05 23:33:01 crc kubenswrapper[4910]: I0105 23:33:01.094905 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6b6cce0e-2a92-49dd-8c47-0e453905b9ea-config-data-custom\") pod \"heat-api-56ffd66999-8rtfw\" (UID: \"6b6cce0e-2a92-49dd-8c47-0e453905b9ea\") " pod="openstack/heat-api-56ffd66999-8rtfw" Jan 05 23:33:01 crc kubenswrapper[4910]: I0105 23:33:01.099106 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4af61181-253f-4ea6-b73c-d0853de6552b-config-data-custom\") pod \"heat-cfnapi-84c9f574d7-j7rfz\" (UID: \"4af61181-253f-4ea6-b73c-d0853de6552b\") " pod="openstack/heat-cfnapi-84c9f574d7-j7rfz" Jan 05 23:33:01 crc kubenswrapper[4910]: I0105 23:33:01.099728 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4af61181-253f-4ea6-b73c-d0853de6552b-combined-ca-bundle\") pod \"heat-cfnapi-84c9f574d7-j7rfz\" (UID: \"4af61181-253f-4ea6-b73c-d0853de6552b\") " pod="openstack/heat-cfnapi-84c9f574d7-j7rfz" Jan 05 23:33:01 crc kubenswrapper[4910]: I0105 23:33:01.100059 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b6cce0e-2a92-49dd-8c47-0e453905b9ea-combined-ca-bundle\") pod \"heat-api-56ffd66999-8rtfw\" (UID: \"6b6cce0e-2a92-49dd-8c47-0e453905b9ea\") " pod="openstack/heat-api-56ffd66999-8rtfw" Jan 05 23:33:01 crc kubenswrapper[4910]: I0105 23:33:01.109371 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlkzw\" (UniqueName: \"kubernetes.io/projected/6b6cce0e-2a92-49dd-8c47-0e453905b9ea-kube-api-access-hlkzw\") pod \"heat-api-56ffd66999-8rtfw\" (UID: \"6b6cce0e-2a92-49dd-8c47-0e453905b9ea\") " pod="openstack/heat-api-56ffd66999-8rtfw" Jan 05 23:33:01 crc kubenswrapper[4910]: I0105 23:33:01.112835 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6lv29\" (UniqueName: \"kubernetes.io/projected/4af61181-253f-4ea6-b73c-d0853de6552b-kube-api-access-6lv29\") pod \"heat-cfnapi-84c9f574d7-j7rfz\" (UID: \"4af61181-253f-4ea6-b73c-d0853de6552b\") " pod="openstack/heat-cfnapi-84c9f574d7-j7rfz" Jan 05 23:33:01 crc kubenswrapper[4910]: I0105 23:33:01.128040 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b6cce0e-2a92-49dd-8c47-0e453905b9ea-config-data\") pod \"heat-api-56ffd66999-8rtfw\" (UID: \"6b6cce0e-2a92-49dd-8c47-0e453905b9ea\") " pod="openstack/heat-api-56ffd66999-8rtfw" Jan 05 23:33:01 crc kubenswrapper[4910]: I0105 23:33:01.321502 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-84c9f574d7-j7rfz" Jan 05 23:33:01 crc kubenswrapper[4910]: I0105 23:33:01.332935 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-56ffd66999-8rtfw" Jan 05 23:33:01 crc kubenswrapper[4910]: I0105 23:33:01.585750 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-57bb7c69d4-76xm4"] Jan 05 23:33:01 crc kubenswrapper[4910]: W0105 23:33:01.587469 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod70c09a40_83e9_4f29_8718_26e434fd2935.slice/crio-ce93d784416bab24731a47597bf5d96ab23c35ba7739bcbdab3a926e015a2b59 WatchSource:0}: Error finding container ce93d784416bab24731a47597bf5d96ab23c35ba7739bcbdab3a926e015a2b59: Status 404 returned error can't find the container with id ce93d784416bab24731a47597bf5d96ab23c35ba7739bcbdab3a926e015a2b59 Jan 05 23:33:01 crc kubenswrapper[4910]: I0105 23:33:01.722330 4910 scope.go:117] "RemoveContainer" containerID="f0334e4359831541c2f90c0defd8867b10c126be0235edac4acd76d34a0ba514" Jan 05 23:33:01 crc kubenswrapper[4910]: E0105 23:33:01.722596 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:33:01 crc kubenswrapper[4910]: I0105 23:33:01.836402 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-84c9f574d7-j7rfz"] Jan 05 23:33:01 crc kubenswrapper[4910]: W0105 23:33:01.843476 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6b6cce0e_2a92_49dd_8c47_0e453905b9ea.slice/crio-2e912cba6b2ae00a3ae069eb5de23da3a84e061f22cf8fe38397125c72ecca04 WatchSource:0}: Error finding container 2e912cba6b2ae00a3ae069eb5de23da3a84e061f22cf8fe38397125c72ecca04: Status 404 returned error can't find the container with id 2e912cba6b2ae00a3ae069eb5de23da3a84e061f22cf8fe38397125c72ecca04 Jan 05 23:33:01 crc kubenswrapper[4910]: I0105 23:33:01.848048 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-56ffd66999-8rtfw"] Jan 05 23:33:01 crc kubenswrapper[4910]: W0105 23:33:01.849624 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4af61181_253f_4ea6_b73c_d0853de6552b.slice/crio-f8d48158a65dd38d9a774aa510c774a2ea6ca261f42ceb42f7918245ef424a03 WatchSource:0}: Error finding container f8d48158a65dd38d9a774aa510c774a2ea6ca261f42ceb42f7918245ef424a03: Status 404 returned error can't find the container with id f8d48158a65dd38d9a774aa510c774a2ea6ca261f42ceb42f7918245ef424a03 Jan 05 23:33:02 crc kubenswrapper[4910]: I0105 23:33:02.600980 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-84c9f574d7-j7rfz" event={"ID":"4af61181-253f-4ea6-b73c-d0853de6552b","Type":"ContainerStarted","Data":"f8d48158a65dd38d9a774aa510c774a2ea6ca261f42ceb42f7918245ef424a03"} Jan 05 23:33:02 crc kubenswrapper[4910]: I0105 23:33:02.605307 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-57bb7c69d4-76xm4" event={"ID":"70c09a40-83e9-4f29-8718-26e434fd2935","Type":"ContainerStarted","Data":"c2f306e03595920a36cca70b11617d09eda6a8fd280a1bb7702bee0497dd8daf"} Jan 05 23:33:02 crc kubenswrapper[4910]: I0105 23:33:02.605360 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-57bb7c69d4-76xm4" event={"ID":"70c09a40-83e9-4f29-8718-26e434fd2935","Type":"ContainerStarted","Data":"ce93d784416bab24731a47597bf5d96ab23c35ba7739bcbdab3a926e015a2b59"} Jan 05 23:33:02 crc kubenswrapper[4910]: I0105 23:33:02.605558 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-57bb7c69d4-76xm4" Jan 05 23:33:02 crc kubenswrapper[4910]: I0105 23:33:02.606589 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-56ffd66999-8rtfw" event={"ID":"6b6cce0e-2a92-49dd-8c47-0e453905b9ea","Type":"ContainerStarted","Data":"2e912cba6b2ae00a3ae069eb5de23da3a84e061f22cf8fe38397125c72ecca04"} Jan 05 23:33:02 crc kubenswrapper[4910]: I0105 23:33:02.630445 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-57bb7c69d4-76xm4" podStartSLOduration=2.630425271 podStartE2EDuration="2.630425271s" podCreationTimestamp="2026-01-05 23:33:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:33:02.622526433 +0000 UTC m=+6114.200024113" watchObservedRunningTime="2026-01-05 23:33:02.630425271 +0000 UTC m=+6114.207922941" Jan 05 23:33:03 crc kubenswrapper[4910]: I0105 23:33:03.255081 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-68557c6fd9-rbl2l" Jan 05 23:33:04 crc kubenswrapper[4910]: I0105 23:33:04.654539 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-84c9f574d7-j7rfz" event={"ID":"4af61181-253f-4ea6-b73c-d0853de6552b","Type":"ContainerStarted","Data":"9078ff7ce5dd34b3fe92324c09e129056a0d4ca7ce1c161460c3f404ee4ffb90"} Jan 05 23:33:04 crc kubenswrapper[4910]: I0105 23:33:04.655111 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-84c9f574d7-j7rfz" Jan 05 23:33:04 crc kubenswrapper[4910]: I0105 23:33:04.656179 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-56ffd66999-8rtfw" event={"ID":"6b6cce0e-2a92-49dd-8c47-0e453905b9ea","Type":"ContainerStarted","Data":"ba2d8b23b8cd3eb0dadfd5ed781ab38dbd43812648aecaa0673ca7e4219e9b68"} Jan 05 23:33:04 crc kubenswrapper[4910]: I0105 23:33:04.657317 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-56ffd66999-8rtfw" Jan 05 23:33:04 crc kubenswrapper[4910]: I0105 23:33:04.699065 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-84c9f574d7-j7rfz" podStartSLOduration=2.610631199 podStartE2EDuration="4.699034642s" podCreationTimestamp="2026-01-05 23:33:00 +0000 UTC" firstStartedPulling="2026-01-05 23:33:01.855114775 +0000 UTC m=+6113.432612445" lastFinishedPulling="2026-01-05 23:33:03.943518218 +0000 UTC m=+6115.521015888" observedRunningTime="2026-01-05 23:33:04.690101129 +0000 UTC m=+6116.267598799" watchObservedRunningTime="2026-01-05 23:33:04.699034642 +0000 UTC m=+6116.276532322" Jan 05 23:33:04 crc kubenswrapper[4910]: I0105 23:33:04.710750 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-56ffd66999-8rtfw" podStartSLOduration=2.610201429 podStartE2EDuration="4.710729871s" podCreationTimestamp="2026-01-05 23:33:00 +0000 UTC" firstStartedPulling="2026-01-05 23:33:01.846478319 +0000 UTC m=+6113.423975989" lastFinishedPulling="2026-01-05 23:33:03.947006761 +0000 UTC m=+6115.524504431" observedRunningTime="2026-01-05 23:33:04.705925847 +0000 UTC m=+6116.283423537" watchObservedRunningTime="2026-01-05 23:33:04.710729871 +0000 UTC m=+6116.288227541" Jan 05 23:33:05 crc kubenswrapper[4910]: I0105 23:33:05.132589 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-68557c6fd9-rbl2l" Jan 05 23:33:05 crc kubenswrapper[4910]: I0105 23:33:05.190658 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-8585df8647-wlz5f"] Jan 05 23:33:05 crc kubenswrapper[4910]: I0105 23:33:05.190920 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-8585df8647-wlz5f" podUID="226256c6-0132-469c-af61-bf062ea41762" containerName="horizon-log" containerID="cri-o://d2d59482631a40a4a2ad4071dab47b5c28192b80780bc401ff67e1f7af399f06" gracePeriod=30 Jan 05 23:33:05 crc kubenswrapper[4910]: I0105 23:33:05.191008 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-8585df8647-wlz5f" podUID="226256c6-0132-469c-af61-bf062ea41762" containerName="horizon" containerID="cri-o://e2e22b952c75535273d518b9ff0c101e27b86ecf318387e70466ab879aa704b5" gracePeriod=30 Jan 05 23:33:08 crc kubenswrapper[4910]: I0105 23:33:08.699228 4910 generic.go:334] "Generic (PLEG): container finished" podID="226256c6-0132-469c-af61-bf062ea41762" containerID="e2e22b952c75535273d518b9ff0c101e27b86ecf318387e70466ab879aa704b5" exitCode=0 Jan 05 23:33:08 crc kubenswrapper[4910]: I0105 23:33:08.699305 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8585df8647-wlz5f" event={"ID":"226256c6-0132-469c-af61-bf062ea41762","Type":"ContainerDied","Data":"e2e22b952c75535273d518b9ff0c101e27b86ecf318387e70466ab879aa704b5"} Jan 05 23:33:11 crc kubenswrapper[4910]: I0105 23:33:11.045283 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-af75-account-create-update-w7xpn"] Jan 05 23:33:11 crc kubenswrapper[4910]: I0105 23:33:11.054885 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-5z8ph"] Jan 05 23:33:11 crc kubenswrapper[4910]: I0105 23:33:11.056594 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-57bb7c69d4-76xm4" Jan 05 23:33:11 crc kubenswrapper[4910]: I0105 23:33:11.064180 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-af75-account-create-update-w7xpn"] Jan 05 23:33:11 crc kubenswrapper[4910]: I0105 23:33:11.077896 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-5z8ph"] Jan 05 23:33:12 crc kubenswrapper[4910]: I0105 23:33:12.662992 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-84c9f574d7-j7rfz" Jan 05 23:33:12 crc kubenswrapper[4910]: I0105 23:33:12.733994 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6679f87f-3fe1-413f-bf56-c5a2c56de981" path="/var/lib/kubelet/pods/6679f87f-3fe1-413f-bf56-c5a2c56de981/volumes" Jan 05 23:33:12 crc kubenswrapper[4910]: I0105 23:33:12.735433 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e381097e-e5fe-4f7c-b5da-afe0bab26a73" path="/var/lib/kubelet/pods/e381097e-e5fe-4f7c-b5da-afe0bab26a73/volumes" Jan 05 23:33:12 crc kubenswrapper[4910]: I0105 23:33:12.736416 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-56ffd66999-8rtfw" Jan 05 23:33:14 crc kubenswrapper[4910]: I0105 23:33:14.321193 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-8585df8647-wlz5f" podUID="226256c6-0132-469c-af61-bf062ea41762" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.117:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.117:8080: connect: connection refused" Jan 05 23:33:14 crc kubenswrapper[4910]: I0105 23:33:14.721694 4910 scope.go:117] "RemoveContainer" containerID="f0334e4359831541c2f90c0defd8867b10c126be0235edac4acd76d34a0ba514" Jan 05 23:33:14 crc kubenswrapper[4910]: E0105 23:33:14.722043 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:33:18 crc kubenswrapper[4910]: I0105 23:33:18.062789 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-dg9ps"] Jan 05 23:33:18 crc kubenswrapper[4910]: I0105 23:33:18.077004 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-dg9ps"] Jan 05 23:33:18 crc kubenswrapper[4910]: I0105 23:33:18.757976 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7da2c266-02d4-4e31-b884-1d4b03678794" path="/var/lib/kubelet/pods/7da2c266-02d4-4e31-b884-1d4b03678794/volumes" Jan 05 23:33:24 crc kubenswrapper[4910]: I0105 23:33:24.320884 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-8585df8647-wlz5f" podUID="226256c6-0132-469c-af61-bf062ea41762" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.117:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.117:8080: connect: connection refused" Jan 05 23:33:25 crc kubenswrapper[4910]: I0105 23:33:25.721834 4910 scope.go:117] "RemoveContainer" containerID="f0334e4359831541c2f90c0defd8867b10c126be0235edac4acd76d34a0ba514" Jan 05 23:33:25 crc kubenswrapper[4910]: E0105 23:33:25.722136 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:33:33 crc kubenswrapper[4910]: I0105 23:33:33.033044 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx"] Jan 05 23:33:33 crc kubenswrapper[4910]: I0105 23:33:33.037108 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx" Jan 05 23:33:33 crc kubenswrapper[4910]: I0105 23:33:33.045040 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 05 23:33:33 crc kubenswrapper[4910]: I0105 23:33:33.083325 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx"] Jan 05 23:33:33 crc kubenswrapper[4910]: I0105 23:33:33.148923 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ebca1bd5-9586-4341-9f53-ad40bf1827f0-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx\" (UID: \"ebca1bd5-9586-4341-9f53-ad40bf1827f0\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx" Jan 05 23:33:33 crc kubenswrapper[4910]: I0105 23:33:33.149034 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6m8h9\" (UniqueName: \"kubernetes.io/projected/ebca1bd5-9586-4341-9f53-ad40bf1827f0-kube-api-access-6m8h9\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx\" (UID: \"ebca1bd5-9586-4341-9f53-ad40bf1827f0\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx" Jan 05 23:33:33 crc kubenswrapper[4910]: I0105 23:33:33.149332 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ebca1bd5-9586-4341-9f53-ad40bf1827f0-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx\" (UID: \"ebca1bd5-9586-4341-9f53-ad40bf1827f0\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx" Jan 05 23:33:33 crc kubenswrapper[4910]: I0105 23:33:33.251027 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6m8h9\" (UniqueName: \"kubernetes.io/projected/ebca1bd5-9586-4341-9f53-ad40bf1827f0-kube-api-access-6m8h9\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx\" (UID: \"ebca1bd5-9586-4341-9f53-ad40bf1827f0\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx" Jan 05 23:33:33 crc kubenswrapper[4910]: I0105 23:33:33.251399 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ebca1bd5-9586-4341-9f53-ad40bf1827f0-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx\" (UID: \"ebca1bd5-9586-4341-9f53-ad40bf1827f0\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx" Jan 05 23:33:33 crc kubenswrapper[4910]: I0105 23:33:33.251582 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ebca1bd5-9586-4341-9f53-ad40bf1827f0-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx\" (UID: \"ebca1bd5-9586-4341-9f53-ad40bf1827f0\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx" Jan 05 23:33:33 crc kubenswrapper[4910]: I0105 23:33:33.252009 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ebca1bd5-9586-4341-9f53-ad40bf1827f0-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx\" (UID: \"ebca1bd5-9586-4341-9f53-ad40bf1827f0\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx" Jan 05 23:33:33 crc kubenswrapper[4910]: I0105 23:33:33.252553 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ebca1bd5-9586-4341-9f53-ad40bf1827f0-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx\" (UID: \"ebca1bd5-9586-4341-9f53-ad40bf1827f0\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx" Jan 05 23:33:33 crc kubenswrapper[4910]: I0105 23:33:33.291275 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6m8h9\" (UniqueName: \"kubernetes.io/projected/ebca1bd5-9586-4341-9f53-ad40bf1827f0-kube-api-access-6m8h9\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx\" (UID: \"ebca1bd5-9586-4341-9f53-ad40bf1827f0\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx" Jan 05 23:33:33 crc kubenswrapper[4910]: I0105 23:33:33.373500 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx" Jan 05 23:33:33 crc kubenswrapper[4910]: I0105 23:33:33.957765 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx"] Jan 05 23:33:34 crc kubenswrapper[4910]: I0105 23:33:34.032888 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx" event={"ID":"ebca1bd5-9586-4341-9f53-ad40bf1827f0","Type":"ContainerStarted","Data":"f528fece3c06b17ce01cf636fa6b1383bbd3e030b25cb4e6e7bfe8bfb34e3fa3"} Jan 05 23:33:34 crc kubenswrapper[4910]: I0105 23:33:34.321132 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-8585df8647-wlz5f" podUID="226256c6-0132-469c-af61-bf062ea41762" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.117:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.117:8080: connect: connection refused" Jan 05 23:33:34 crc kubenswrapper[4910]: I0105 23:33:34.321580 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-8585df8647-wlz5f" Jan 05 23:33:35 crc kubenswrapper[4910]: I0105 23:33:35.048896 4910 generic.go:334] "Generic (PLEG): container finished" podID="ebca1bd5-9586-4341-9f53-ad40bf1827f0" containerID="3c17f7bc1111a88cbb9e2e2d70b55aec024058a45824598d144d8f9385c03b1e" exitCode=0 Jan 05 23:33:35 crc kubenswrapper[4910]: I0105 23:33:35.048968 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx" event={"ID":"ebca1bd5-9586-4341-9f53-ad40bf1827f0","Type":"ContainerDied","Data":"3c17f7bc1111a88cbb9e2e2d70b55aec024058a45824598d144d8f9385c03b1e"} Jan 05 23:33:35 crc kubenswrapper[4910]: I0105 23:33:35.660725 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-8585df8647-wlz5f" Jan 05 23:33:35 crc kubenswrapper[4910]: I0105 23:33:35.704740 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/226256c6-0132-469c-af61-bf062ea41762-horizon-secret-key\") pod \"226256c6-0132-469c-af61-bf062ea41762\" (UID: \"226256c6-0132-469c-af61-bf062ea41762\") " Jan 05 23:33:35 crc kubenswrapper[4910]: I0105 23:33:35.705091 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/226256c6-0132-469c-af61-bf062ea41762-config-data\") pod \"226256c6-0132-469c-af61-bf062ea41762\" (UID: \"226256c6-0132-469c-af61-bf062ea41762\") " Jan 05 23:33:35 crc kubenswrapper[4910]: I0105 23:33:35.705212 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nw8t5\" (UniqueName: \"kubernetes.io/projected/226256c6-0132-469c-af61-bf062ea41762-kube-api-access-nw8t5\") pod \"226256c6-0132-469c-af61-bf062ea41762\" (UID: \"226256c6-0132-469c-af61-bf062ea41762\") " Jan 05 23:33:35 crc kubenswrapper[4910]: I0105 23:33:35.705292 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/226256c6-0132-469c-af61-bf062ea41762-logs\") pod \"226256c6-0132-469c-af61-bf062ea41762\" (UID: \"226256c6-0132-469c-af61-bf062ea41762\") " Jan 05 23:33:35 crc kubenswrapper[4910]: I0105 23:33:35.705367 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/226256c6-0132-469c-af61-bf062ea41762-scripts\") pod \"226256c6-0132-469c-af61-bf062ea41762\" (UID: \"226256c6-0132-469c-af61-bf062ea41762\") " Jan 05 23:33:35 crc kubenswrapper[4910]: I0105 23:33:35.705947 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/226256c6-0132-469c-af61-bf062ea41762-logs" (OuterVolumeSpecName: "logs") pod "226256c6-0132-469c-af61-bf062ea41762" (UID: "226256c6-0132-469c-af61-bf062ea41762"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:33:35 crc kubenswrapper[4910]: I0105 23:33:35.706366 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/226256c6-0132-469c-af61-bf062ea41762-logs\") on node \"crc\" DevicePath \"\"" Jan 05 23:33:35 crc kubenswrapper[4910]: I0105 23:33:35.711242 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/226256c6-0132-469c-af61-bf062ea41762-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "226256c6-0132-469c-af61-bf062ea41762" (UID: "226256c6-0132-469c-af61-bf062ea41762"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:33:35 crc kubenswrapper[4910]: I0105 23:33:35.717353 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/226256c6-0132-469c-af61-bf062ea41762-kube-api-access-nw8t5" (OuterVolumeSpecName: "kube-api-access-nw8t5") pod "226256c6-0132-469c-af61-bf062ea41762" (UID: "226256c6-0132-469c-af61-bf062ea41762"). InnerVolumeSpecName "kube-api-access-nw8t5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:33:35 crc kubenswrapper[4910]: I0105 23:33:35.742046 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/226256c6-0132-469c-af61-bf062ea41762-config-data" (OuterVolumeSpecName: "config-data") pod "226256c6-0132-469c-af61-bf062ea41762" (UID: "226256c6-0132-469c-af61-bf062ea41762"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:33:35 crc kubenswrapper[4910]: I0105 23:33:35.747395 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/226256c6-0132-469c-af61-bf062ea41762-scripts" (OuterVolumeSpecName: "scripts") pod "226256c6-0132-469c-af61-bf062ea41762" (UID: "226256c6-0132-469c-af61-bf062ea41762"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:33:35 crc kubenswrapper[4910]: I0105 23:33:35.808168 4910 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/226256c6-0132-469c-af61-bf062ea41762-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 05 23:33:35 crc kubenswrapper[4910]: I0105 23:33:35.808211 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/226256c6-0132-469c-af61-bf062ea41762-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:33:35 crc kubenswrapper[4910]: I0105 23:33:35.808223 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nw8t5\" (UniqueName: \"kubernetes.io/projected/226256c6-0132-469c-af61-bf062ea41762-kube-api-access-nw8t5\") on node \"crc\" DevicePath \"\"" Jan 05 23:33:35 crc kubenswrapper[4910]: I0105 23:33:35.808233 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/226256c6-0132-469c-af61-bf062ea41762-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:33:36 crc kubenswrapper[4910]: I0105 23:33:36.061671 4910 generic.go:334] "Generic (PLEG): container finished" podID="226256c6-0132-469c-af61-bf062ea41762" containerID="d2d59482631a40a4a2ad4071dab47b5c28192b80780bc401ff67e1f7af399f06" exitCode=137 Jan 05 23:33:36 crc kubenswrapper[4910]: I0105 23:33:36.061726 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8585df8647-wlz5f" event={"ID":"226256c6-0132-469c-af61-bf062ea41762","Type":"ContainerDied","Data":"d2d59482631a40a4a2ad4071dab47b5c28192b80780bc401ff67e1f7af399f06"} Jan 05 23:33:36 crc kubenswrapper[4910]: I0105 23:33:36.061761 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8585df8647-wlz5f" event={"ID":"226256c6-0132-469c-af61-bf062ea41762","Type":"ContainerDied","Data":"f42e1d4ac6f2be39117cc2e8b80996b7ff392a4eebbaec643ab81c77265f41c9"} Jan 05 23:33:36 crc kubenswrapper[4910]: I0105 23:33:36.061783 4910 scope.go:117] "RemoveContainer" containerID="e2e22b952c75535273d518b9ff0c101e27b86ecf318387e70466ab879aa704b5" Jan 05 23:33:36 crc kubenswrapper[4910]: I0105 23:33:36.061932 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-8585df8647-wlz5f" Jan 05 23:33:36 crc kubenswrapper[4910]: I0105 23:33:36.133403 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-8585df8647-wlz5f"] Jan 05 23:33:36 crc kubenswrapper[4910]: I0105 23:33:36.142922 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-8585df8647-wlz5f"] Jan 05 23:33:36 crc kubenswrapper[4910]: I0105 23:33:36.276894 4910 scope.go:117] "RemoveContainer" containerID="d2d59482631a40a4a2ad4071dab47b5c28192b80780bc401ff67e1f7af399f06" Jan 05 23:33:36 crc kubenswrapper[4910]: I0105 23:33:36.358526 4910 scope.go:117] "RemoveContainer" containerID="e2e22b952c75535273d518b9ff0c101e27b86ecf318387e70466ab879aa704b5" Jan 05 23:33:36 crc kubenswrapper[4910]: E0105 23:33:36.360253 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e2e22b952c75535273d518b9ff0c101e27b86ecf318387e70466ab879aa704b5\": container with ID starting with e2e22b952c75535273d518b9ff0c101e27b86ecf318387e70466ab879aa704b5 not found: ID does not exist" containerID="e2e22b952c75535273d518b9ff0c101e27b86ecf318387e70466ab879aa704b5" Jan 05 23:33:36 crc kubenswrapper[4910]: I0105 23:33:36.360310 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e2e22b952c75535273d518b9ff0c101e27b86ecf318387e70466ab879aa704b5"} err="failed to get container status \"e2e22b952c75535273d518b9ff0c101e27b86ecf318387e70466ab879aa704b5\": rpc error: code = NotFound desc = could not find container \"e2e22b952c75535273d518b9ff0c101e27b86ecf318387e70466ab879aa704b5\": container with ID starting with e2e22b952c75535273d518b9ff0c101e27b86ecf318387e70466ab879aa704b5 not found: ID does not exist" Jan 05 23:33:36 crc kubenswrapper[4910]: I0105 23:33:36.360355 4910 scope.go:117] "RemoveContainer" containerID="d2d59482631a40a4a2ad4071dab47b5c28192b80780bc401ff67e1f7af399f06" Jan 05 23:33:36 crc kubenswrapper[4910]: E0105 23:33:36.361348 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d2d59482631a40a4a2ad4071dab47b5c28192b80780bc401ff67e1f7af399f06\": container with ID starting with d2d59482631a40a4a2ad4071dab47b5c28192b80780bc401ff67e1f7af399f06 not found: ID does not exist" containerID="d2d59482631a40a4a2ad4071dab47b5c28192b80780bc401ff67e1f7af399f06" Jan 05 23:33:36 crc kubenswrapper[4910]: I0105 23:33:36.361388 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2d59482631a40a4a2ad4071dab47b5c28192b80780bc401ff67e1f7af399f06"} err="failed to get container status \"d2d59482631a40a4a2ad4071dab47b5c28192b80780bc401ff67e1f7af399f06\": rpc error: code = NotFound desc = could not find container \"d2d59482631a40a4a2ad4071dab47b5c28192b80780bc401ff67e1f7af399f06\": container with ID starting with d2d59482631a40a4a2ad4071dab47b5c28192b80780bc401ff67e1f7af399f06 not found: ID does not exist" Jan 05 23:33:36 crc kubenswrapper[4910]: I0105 23:33:36.735047 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="226256c6-0132-469c-af61-bf062ea41762" path="/var/lib/kubelet/pods/226256c6-0132-469c-af61-bf062ea41762/volumes" Jan 05 23:33:37 crc kubenswrapper[4910]: I0105 23:33:37.079411 4910 generic.go:334] "Generic (PLEG): container finished" podID="ebca1bd5-9586-4341-9f53-ad40bf1827f0" containerID="15e34738e4048e6206a4988e1e0cfad4305fa6d804418169f7f4dbacab1b4551" exitCode=0 Jan 05 23:33:37 crc kubenswrapper[4910]: I0105 23:33:37.079532 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx" event={"ID":"ebca1bd5-9586-4341-9f53-ad40bf1827f0","Type":"ContainerDied","Data":"15e34738e4048e6206a4988e1e0cfad4305fa6d804418169f7f4dbacab1b4551"} Jan 05 23:33:37 crc kubenswrapper[4910]: I0105 23:33:37.722429 4910 scope.go:117] "RemoveContainer" containerID="f0334e4359831541c2f90c0defd8867b10c126be0235edac4acd76d34a0ba514" Jan 05 23:33:37 crc kubenswrapper[4910]: E0105 23:33:37.723157 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:33:38 crc kubenswrapper[4910]: I0105 23:33:38.096540 4910 generic.go:334] "Generic (PLEG): container finished" podID="ebca1bd5-9586-4341-9f53-ad40bf1827f0" containerID="0ee9e1c8193a479aa4041f9520074414f28d973456953985e80489018f38b771" exitCode=0 Jan 05 23:33:38 crc kubenswrapper[4910]: I0105 23:33:38.096619 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx" event={"ID":"ebca1bd5-9586-4341-9f53-ad40bf1827f0","Type":"ContainerDied","Data":"0ee9e1c8193a479aa4041f9520074414f28d973456953985e80489018f38b771"} Jan 05 23:33:39 crc kubenswrapper[4910]: I0105 23:33:39.543505 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx" Jan 05 23:33:39 crc kubenswrapper[4910]: I0105 23:33:39.601002 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6m8h9\" (UniqueName: \"kubernetes.io/projected/ebca1bd5-9586-4341-9f53-ad40bf1827f0-kube-api-access-6m8h9\") pod \"ebca1bd5-9586-4341-9f53-ad40bf1827f0\" (UID: \"ebca1bd5-9586-4341-9f53-ad40bf1827f0\") " Jan 05 23:33:39 crc kubenswrapper[4910]: I0105 23:33:39.601203 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ebca1bd5-9586-4341-9f53-ad40bf1827f0-util\") pod \"ebca1bd5-9586-4341-9f53-ad40bf1827f0\" (UID: \"ebca1bd5-9586-4341-9f53-ad40bf1827f0\") " Jan 05 23:33:39 crc kubenswrapper[4910]: I0105 23:33:39.601223 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ebca1bd5-9586-4341-9f53-ad40bf1827f0-bundle\") pod \"ebca1bd5-9586-4341-9f53-ad40bf1827f0\" (UID: \"ebca1bd5-9586-4341-9f53-ad40bf1827f0\") " Jan 05 23:33:39 crc kubenswrapper[4910]: I0105 23:33:39.603912 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ebca1bd5-9586-4341-9f53-ad40bf1827f0-bundle" (OuterVolumeSpecName: "bundle") pod "ebca1bd5-9586-4341-9f53-ad40bf1827f0" (UID: "ebca1bd5-9586-4341-9f53-ad40bf1827f0"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:33:39 crc kubenswrapper[4910]: I0105 23:33:39.611394 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ebca1bd5-9586-4341-9f53-ad40bf1827f0-kube-api-access-6m8h9" (OuterVolumeSpecName: "kube-api-access-6m8h9") pod "ebca1bd5-9586-4341-9f53-ad40bf1827f0" (UID: "ebca1bd5-9586-4341-9f53-ad40bf1827f0"). InnerVolumeSpecName "kube-api-access-6m8h9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:33:39 crc kubenswrapper[4910]: I0105 23:33:39.703846 4910 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ebca1bd5-9586-4341-9f53-ad40bf1827f0-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:33:39 crc kubenswrapper[4910]: I0105 23:33:39.703879 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6m8h9\" (UniqueName: \"kubernetes.io/projected/ebca1bd5-9586-4341-9f53-ad40bf1827f0-kube-api-access-6m8h9\") on node \"crc\" DevicePath \"\"" Jan 05 23:33:39 crc kubenswrapper[4910]: I0105 23:33:39.846106 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ebca1bd5-9586-4341-9f53-ad40bf1827f0-util" (OuterVolumeSpecName: "util") pod "ebca1bd5-9586-4341-9f53-ad40bf1827f0" (UID: "ebca1bd5-9586-4341-9f53-ad40bf1827f0"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:33:39 crc kubenswrapper[4910]: I0105 23:33:39.908901 4910 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ebca1bd5-9586-4341-9f53-ad40bf1827f0-util\") on node \"crc\" DevicePath \"\"" Jan 05 23:33:40 crc kubenswrapper[4910]: I0105 23:33:40.128161 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx" event={"ID":"ebca1bd5-9586-4341-9f53-ad40bf1827f0","Type":"ContainerDied","Data":"f528fece3c06b17ce01cf636fa6b1383bbd3e030b25cb4e6e7bfe8bfb34e3fa3"} Jan 05 23:33:40 crc kubenswrapper[4910]: I0105 23:33:40.128223 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f528fece3c06b17ce01cf636fa6b1383bbd3e030b25cb4e6e7bfe8bfb34e3fa3" Jan 05 23:33:40 crc kubenswrapper[4910]: I0105 23:33:40.128277 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx" Jan 05 23:33:47 crc kubenswrapper[4910]: I0105 23:33:47.449151 4910 scope.go:117] "RemoveContainer" containerID="96ecb3fd5b09d81e4a23dd75ad64c9af703b5ea4de235b53ba5c17e3d2d9d334" Jan 05 23:33:47 crc kubenswrapper[4910]: I0105 23:33:47.507771 4910 scope.go:117] "RemoveContainer" containerID="324dff769dd788f14462bf2394c802588cbe7f03e37ecca2dfd20adbd8cad442" Jan 05 23:33:47 crc kubenswrapper[4910]: I0105 23:33:47.533595 4910 scope.go:117] "RemoveContainer" containerID="cdcd0efd55cb0eec9e739b179e9ad4dd3b4a5a19b1286d663c3a35f702514c63" Jan 05 23:33:50 crc kubenswrapper[4910]: I0105 23:33:50.721994 4910 scope.go:117] "RemoveContainer" containerID="f0334e4359831541c2f90c0defd8867b10c126be0235edac4acd76d34a0ba514" Jan 05 23:33:50 crc kubenswrapper[4910]: E0105 23:33:50.722884 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.672913 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-hb5xs"] Jan 05 23:33:51 crc kubenswrapper[4910]: E0105 23:33:51.673398 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebca1bd5-9586-4341-9f53-ad40bf1827f0" containerName="util" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.673419 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebca1bd5-9586-4341-9f53-ad40bf1827f0" containerName="util" Jan 05 23:33:51 crc kubenswrapper[4910]: E0105 23:33:51.673430 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebca1bd5-9586-4341-9f53-ad40bf1827f0" containerName="extract" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.673437 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebca1bd5-9586-4341-9f53-ad40bf1827f0" containerName="extract" Jan 05 23:33:51 crc kubenswrapper[4910]: E0105 23:33:51.673451 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="226256c6-0132-469c-af61-bf062ea41762" containerName="horizon-log" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.673457 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="226256c6-0132-469c-af61-bf062ea41762" containerName="horizon-log" Jan 05 23:33:51 crc kubenswrapper[4910]: E0105 23:33:51.673465 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="226256c6-0132-469c-af61-bf062ea41762" containerName="horizon" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.673471 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="226256c6-0132-469c-af61-bf062ea41762" containerName="horizon" Jan 05 23:33:51 crc kubenswrapper[4910]: E0105 23:33:51.673488 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebca1bd5-9586-4341-9f53-ad40bf1827f0" containerName="pull" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.673494 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebca1bd5-9586-4341-9f53-ad40bf1827f0" containerName="pull" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.673671 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="226256c6-0132-469c-af61-bf062ea41762" containerName="horizon-log" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.673686 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="226256c6-0132-469c-af61-bf062ea41762" containerName="horizon" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.673692 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="ebca1bd5-9586-4341-9f53-ad40bf1827f0" containerName="extract" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.674420 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-hb5xs" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.677079 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-zrzjj" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.677318 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.677458 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.692150 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-hb5xs"] Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.756326 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqfjz\" (UniqueName: \"kubernetes.io/projected/9a834e51-2fcd-4e02-ab87-560d50993337-kube-api-access-fqfjz\") pod \"obo-prometheus-operator-68bc856cb9-hb5xs\" (UID: \"9a834e51-2fcd-4e02-ab87-560d50993337\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-hb5xs" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.778416 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-77fc4ff47-l5mlw"] Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.779753 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77fc4ff47-l5mlw" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.781724 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.782161 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-n9hl6" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.807215 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-77fc4ff47-p2hgp"] Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.808577 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77fc4ff47-p2hgp" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.819088 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-77fc4ff47-l5mlw"] Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.840936 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-77fc4ff47-p2hgp"] Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.859152 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d4f09a8f-4fb6-4720-8fb9-fb2f480e9384-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-77fc4ff47-p2hgp\" (UID: \"d4f09a8f-4fb6-4720-8fb9-fb2f480e9384\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-77fc4ff47-p2hgp" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.859242 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqfjz\" (UniqueName: \"kubernetes.io/projected/9a834e51-2fcd-4e02-ab87-560d50993337-kube-api-access-fqfjz\") pod \"obo-prometheus-operator-68bc856cb9-hb5xs\" (UID: \"9a834e51-2fcd-4e02-ab87-560d50993337\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-hb5xs" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.859265 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/dd44f3c6-e484-4843-8ce1-3c771202ebf0-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-77fc4ff47-l5mlw\" (UID: \"dd44f3c6-e484-4843-8ce1-3c771202ebf0\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-77fc4ff47-l5mlw" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.859303 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d4f09a8f-4fb6-4720-8fb9-fb2f480e9384-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-77fc4ff47-p2hgp\" (UID: \"d4f09a8f-4fb6-4720-8fb9-fb2f480e9384\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-77fc4ff47-p2hgp" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.859334 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/dd44f3c6-e484-4843-8ce1-3c771202ebf0-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-77fc4ff47-l5mlw\" (UID: \"dd44f3c6-e484-4843-8ce1-3c771202ebf0\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-77fc4ff47-l5mlw" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.880718 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqfjz\" (UniqueName: \"kubernetes.io/projected/9a834e51-2fcd-4e02-ab87-560d50993337-kube-api-access-fqfjz\") pod \"obo-prometheus-operator-68bc856cb9-hb5xs\" (UID: \"9a834e51-2fcd-4e02-ab87-560d50993337\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-hb5xs" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.900479 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-7ndlc"] Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.901793 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-7ndlc" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.907856 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.907900 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-vbmrc" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.915381 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-7ndlc"] Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.961263 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/dd44f3c6-e484-4843-8ce1-3c771202ebf0-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-77fc4ff47-l5mlw\" (UID: \"dd44f3c6-e484-4843-8ce1-3c771202ebf0\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-77fc4ff47-l5mlw" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.961340 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d4f09a8f-4fb6-4720-8fb9-fb2f480e9384-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-77fc4ff47-p2hgp\" (UID: \"d4f09a8f-4fb6-4720-8fb9-fb2f480e9384\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-77fc4ff47-p2hgp" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.961390 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/dd44f3c6-e484-4843-8ce1-3c771202ebf0-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-77fc4ff47-l5mlw\" (UID: \"dd44f3c6-e484-4843-8ce1-3c771202ebf0\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-77fc4ff47-l5mlw" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.961447 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mgbc8\" (UniqueName: \"kubernetes.io/projected/fd9aa314-35df-4c05-92fd-aa1127e8e80a-kube-api-access-mgbc8\") pod \"observability-operator-59bdc8b94-7ndlc\" (UID: \"fd9aa314-35df-4c05-92fd-aa1127e8e80a\") " pod="openshift-operators/observability-operator-59bdc8b94-7ndlc" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.961543 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/fd9aa314-35df-4c05-92fd-aa1127e8e80a-observability-operator-tls\") pod \"observability-operator-59bdc8b94-7ndlc\" (UID: \"fd9aa314-35df-4c05-92fd-aa1127e8e80a\") " pod="openshift-operators/observability-operator-59bdc8b94-7ndlc" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.961569 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d4f09a8f-4fb6-4720-8fb9-fb2f480e9384-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-77fc4ff47-p2hgp\" (UID: \"d4f09a8f-4fb6-4720-8fb9-fb2f480e9384\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-77fc4ff47-p2hgp" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.965731 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d4f09a8f-4fb6-4720-8fb9-fb2f480e9384-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-77fc4ff47-p2hgp\" (UID: \"d4f09a8f-4fb6-4720-8fb9-fb2f480e9384\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-77fc4ff47-p2hgp" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.966552 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/dd44f3c6-e484-4843-8ce1-3c771202ebf0-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-77fc4ff47-l5mlw\" (UID: \"dd44f3c6-e484-4843-8ce1-3c771202ebf0\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-77fc4ff47-l5mlw" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.967730 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d4f09a8f-4fb6-4720-8fb9-fb2f480e9384-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-77fc4ff47-p2hgp\" (UID: \"d4f09a8f-4fb6-4720-8fb9-fb2f480e9384\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-77fc4ff47-p2hgp" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.978831 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/dd44f3c6-e484-4843-8ce1-3c771202ebf0-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-77fc4ff47-l5mlw\" (UID: \"dd44f3c6-e484-4843-8ce1-3c771202ebf0\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-77fc4ff47-l5mlw" Jan 05 23:33:51 crc kubenswrapper[4910]: I0105 23:33:51.994143 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-hb5xs" Jan 05 23:33:52 crc kubenswrapper[4910]: I0105 23:33:52.064109 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mgbc8\" (UniqueName: \"kubernetes.io/projected/fd9aa314-35df-4c05-92fd-aa1127e8e80a-kube-api-access-mgbc8\") pod \"observability-operator-59bdc8b94-7ndlc\" (UID: \"fd9aa314-35df-4c05-92fd-aa1127e8e80a\") " pod="openshift-operators/observability-operator-59bdc8b94-7ndlc" Jan 05 23:33:52 crc kubenswrapper[4910]: I0105 23:33:52.064280 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/fd9aa314-35df-4c05-92fd-aa1127e8e80a-observability-operator-tls\") pod \"observability-operator-59bdc8b94-7ndlc\" (UID: \"fd9aa314-35df-4c05-92fd-aa1127e8e80a\") " pod="openshift-operators/observability-operator-59bdc8b94-7ndlc" Jan 05 23:33:52 crc kubenswrapper[4910]: I0105 23:33:52.071587 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/fd9aa314-35df-4c05-92fd-aa1127e8e80a-observability-operator-tls\") pod \"observability-operator-59bdc8b94-7ndlc\" (UID: \"fd9aa314-35df-4c05-92fd-aa1127e8e80a\") " pod="openshift-operators/observability-operator-59bdc8b94-7ndlc" Jan 05 23:33:52 crc kubenswrapper[4910]: I0105 23:33:52.090170 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mgbc8\" (UniqueName: \"kubernetes.io/projected/fd9aa314-35df-4c05-92fd-aa1127e8e80a-kube-api-access-mgbc8\") pod \"observability-operator-59bdc8b94-7ndlc\" (UID: \"fd9aa314-35df-4c05-92fd-aa1127e8e80a\") " pod="openshift-operators/observability-operator-59bdc8b94-7ndlc" Jan 05 23:33:52 crc kubenswrapper[4910]: I0105 23:33:52.100312 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77fc4ff47-l5mlw" Jan 05 23:33:52 crc kubenswrapper[4910]: I0105 23:33:52.104911 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-xf7mz"] Jan 05 23:33:52 crc kubenswrapper[4910]: I0105 23:33:52.106325 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-xf7mz" Jan 05 23:33:52 crc kubenswrapper[4910]: I0105 23:33:52.109041 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-v5ddt" Jan 05 23:33:52 crc kubenswrapper[4910]: I0105 23:33:52.124856 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-xf7mz"] Jan 05 23:33:52 crc kubenswrapper[4910]: I0105 23:33:52.129776 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77fc4ff47-p2hgp" Jan 05 23:33:52 crc kubenswrapper[4910]: I0105 23:33:52.165699 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/d6b4354f-e670-44e7-a48e-4252a4ac68a6-openshift-service-ca\") pod \"perses-operator-5bf474d74f-xf7mz\" (UID: \"d6b4354f-e670-44e7-a48e-4252a4ac68a6\") " pod="openshift-operators/perses-operator-5bf474d74f-xf7mz" Jan 05 23:33:52 crc kubenswrapper[4910]: I0105 23:33:52.165936 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vg7cg\" (UniqueName: \"kubernetes.io/projected/d6b4354f-e670-44e7-a48e-4252a4ac68a6-kube-api-access-vg7cg\") pod \"perses-operator-5bf474d74f-xf7mz\" (UID: \"d6b4354f-e670-44e7-a48e-4252a4ac68a6\") " pod="openshift-operators/perses-operator-5bf474d74f-xf7mz" Jan 05 23:33:52 crc kubenswrapper[4910]: I0105 23:33:52.251688 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-7ndlc" Jan 05 23:33:52 crc kubenswrapper[4910]: I0105 23:33:52.269419 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/d6b4354f-e670-44e7-a48e-4252a4ac68a6-openshift-service-ca\") pod \"perses-operator-5bf474d74f-xf7mz\" (UID: \"d6b4354f-e670-44e7-a48e-4252a4ac68a6\") " pod="openshift-operators/perses-operator-5bf474d74f-xf7mz" Jan 05 23:33:52 crc kubenswrapper[4910]: I0105 23:33:52.269483 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vg7cg\" (UniqueName: \"kubernetes.io/projected/d6b4354f-e670-44e7-a48e-4252a4ac68a6-kube-api-access-vg7cg\") pod \"perses-operator-5bf474d74f-xf7mz\" (UID: \"d6b4354f-e670-44e7-a48e-4252a4ac68a6\") " pod="openshift-operators/perses-operator-5bf474d74f-xf7mz" Jan 05 23:33:52 crc kubenswrapper[4910]: I0105 23:33:52.270706 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/d6b4354f-e670-44e7-a48e-4252a4ac68a6-openshift-service-ca\") pod \"perses-operator-5bf474d74f-xf7mz\" (UID: \"d6b4354f-e670-44e7-a48e-4252a4ac68a6\") " pod="openshift-operators/perses-operator-5bf474d74f-xf7mz" Jan 05 23:33:52 crc kubenswrapper[4910]: I0105 23:33:52.296765 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vg7cg\" (UniqueName: \"kubernetes.io/projected/d6b4354f-e670-44e7-a48e-4252a4ac68a6-kube-api-access-vg7cg\") pod \"perses-operator-5bf474d74f-xf7mz\" (UID: \"d6b4354f-e670-44e7-a48e-4252a4ac68a6\") " pod="openshift-operators/perses-operator-5bf474d74f-xf7mz" Jan 05 23:33:52 crc kubenswrapper[4910]: I0105 23:33:52.464981 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-xf7mz" Jan 05 23:33:52 crc kubenswrapper[4910]: I0105 23:33:52.605629 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-hb5xs"] Jan 05 23:33:52 crc kubenswrapper[4910]: I0105 23:33:52.779928 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-77fc4ff47-l5mlw"] Jan 05 23:33:52 crc kubenswrapper[4910]: I0105 23:33:52.863736 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-77fc4ff47-p2hgp"] Jan 05 23:33:52 crc kubenswrapper[4910]: W0105 23:33:52.901038 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd4f09a8f_4fb6_4720_8fb9_fb2f480e9384.slice/crio-c12177657b58be33d55769310de6fab3b4792b229cf40c08c16f6400a4de5244 WatchSource:0}: Error finding container c12177657b58be33d55769310de6fab3b4792b229cf40c08c16f6400a4de5244: Status 404 returned error can't find the container with id c12177657b58be33d55769310de6fab3b4792b229cf40c08c16f6400a4de5244 Jan 05 23:33:53 crc kubenswrapper[4910]: I0105 23:33:53.031945 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-7ndlc"] Jan 05 23:33:53 crc kubenswrapper[4910]: W0105 23:33:53.039334 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfd9aa314_35df_4c05_92fd_aa1127e8e80a.slice/crio-9af80956396b85a94f43f3a38370d41bf2286637d4177a05a1d736b8ac4ca483 WatchSource:0}: Error finding container 9af80956396b85a94f43f3a38370d41bf2286637d4177a05a1d736b8ac4ca483: Status 404 returned error can't find the container with id 9af80956396b85a94f43f3a38370d41bf2286637d4177a05a1d736b8ac4ca483 Jan 05 23:33:53 crc kubenswrapper[4910]: I0105 23:33:53.106103 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-xf7mz"] Jan 05 23:33:53 crc kubenswrapper[4910]: W0105 23:33:53.123221 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd6b4354f_e670_44e7_a48e_4252a4ac68a6.slice/crio-993fd0ef3974644ac909154323e0e907c9fd3b2a143bd551230e7e143617bd26 WatchSource:0}: Error finding container 993fd0ef3974644ac909154323e0e907c9fd3b2a143bd551230e7e143617bd26: Status 404 returned error can't find the container with id 993fd0ef3974644ac909154323e0e907c9fd3b2a143bd551230e7e143617bd26 Jan 05 23:33:53 crc kubenswrapper[4910]: I0105 23:33:53.307495 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5bf474d74f-xf7mz" event={"ID":"d6b4354f-e670-44e7-a48e-4252a4ac68a6","Type":"ContainerStarted","Data":"993fd0ef3974644ac909154323e0e907c9fd3b2a143bd551230e7e143617bd26"} Jan 05 23:33:53 crc kubenswrapper[4910]: I0105 23:33:53.313662 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77fc4ff47-p2hgp" event={"ID":"d4f09a8f-4fb6-4720-8fb9-fb2f480e9384","Type":"ContainerStarted","Data":"c12177657b58be33d55769310de6fab3b4792b229cf40c08c16f6400a4de5244"} Jan 05 23:33:53 crc kubenswrapper[4910]: I0105 23:33:53.315735 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-59bdc8b94-7ndlc" event={"ID":"fd9aa314-35df-4c05-92fd-aa1127e8e80a","Type":"ContainerStarted","Data":"9af80956396b85a94f43f3a38370d41bf2286637d4177a05a1d736b8ac4ca483"} Jan 05 23:33:53 crc kubenswrapper[4910]: I0105 23:33:53.317254 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77fc4ff47-l5mlw" event={"ID":"dd44f3c6-e484-4843-8ce1-3c771202ebf0","Type":"ContainerStarted","Data":"508022e713eef824d00de3409c3b6ae97e4be96e7e8952ab592c63017fe2cd71"} Jan 05 23:33:53 crc kubenswrapper[4910]: I0105 23:33:53.322061 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-hb5xs" event={"ID":"9a834e51-2fcd-4e02-ab87-560d50993337","Type":"ContainerStarted","Data":"aae923122cb193d24a05c4e705449c99ce9525eca8b3a110ec2ff4ac9d28886f"} Jan 05 23:33:58 crc kubenswrapper[4910]: I0105 23:33:58.386000 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77fc4ff47-p2hgp" event={"ID":"d4f09a8f-4fb6-4720-8fb9-fb2f480e9384","Type":"ContainerStarted","Data":"646021f428b9e96d5af250a9238db3d91b8628c1ff3f20233fc1c5bb376fdef4"} Jan 05 23:33:58 crc kubenswrapper[4910]: I0105 23:33:58.388308 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77fc4ff47-l5mlw" event={"ID":"dd44f3c6-e484-4843-8ce1-3c771202ebf0","Type":"ContainerStarted","Data":"0ed5374680c85b59735d6f1b3b02086013ce5505104af68e0a4eb43addffedf2"} Jan 05 23:33:58 crc kubenswrapper[4910]: I0105 23:33:58.393673 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-hb5xs" event={"ID":"9a834e51-2fcd-4e02-ab87-560d50993337","Type":"ContainerStarted","Data":"ad9dbf52f3313fcfcdea636047da1119d5ec99381fe9ae1f48fbf79ef64fe30b"} Jan 05 23:33:58 crc kubenswrapper[4910]: I0105 23:33:58.395371 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5bf474d74f-xf7mz" event={"ID":"d6b4354f-e670-44e7-a48e-4252a4ac68a6","Type":"ContainerStarted","Data":"81bda4e21747feed83d276c6079f090b3bba63e3e351fd2c407216d0b81b0c0a"} Jan 05 23:33:58 crc kubenswrapper[4910]: I0105 23:33:58.395549 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5bf474d74f-xf7mz" Jan 05 23:33:58 crc kubenswrapper[4910]: I0105 23:33:58.416535 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77fc4ff47-p2hgp" podStartSLOduration=2.9744127049999998 podStartE2EDuration="7.416512092s" podCreationTimestamp="2026-01-05 23:33:51 +0000 UTC" firstStartedPulling="2026-01-05 23:33:52.921876035 +0000 UTC m=+6164.499373705" lastFinishedPulling="2026-01-05 23:33:57.363975422 +0000 UTC m=+6168.941473092" observedRunningTime="2026-01-05 23:33:58.403299947 +0000 UTC m=+6169.980797607" watchObservedRunningTime="2026-01-05 23:33:58.416512092 +0000 UTC m=+6169.994009772" Jan 05 23:33:58 crc kubenswrapper[4910]: I0105 23:33:58.431180 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5bf474d74f-xf7mz" podStartSLOduration=2.191145636 podStartE2EDuration="6.431157682s" podCreationTimestamp="2026-01-05 23:33:52 +0000 UTC" firstStartedPulling="2026-01-05 23:33:53.132063919 +0000 UTC m=+6164.709561589" lastFinishedPulling="2026-01-05 23:33:57.372075965 +0000 UTC m=+6168.949573635" observedRunningTime="2026-01-05 23:33:58.422025604 +0000 UTC m=+6169.999523274" watchObservedRunningTime="2026-01-05 23:33:58.431157682 +0000 UTC m=+6170.008655342" Jan 05 23:33:58 crc kubenswrapper[4910]: I0105 23:33:58.458175 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-hb5xs" podStartSLOduration=2.722467666 podStartE2EDuration="7.458153186s" podCreationTimestamp="2026-01-05 23:33:51 +0000 UTC" firstStartedPulling="2026-01-05 23:33:52.63575132 +0000 UTC m=+6164.213248990" lastFinishedPulling="2026-01-05 23:33:57.37143684 +0000 UTC m=+6168.948934510" observedRunningTime="2026-01-05 23:33:58.451649121 +0000 UTC m=+6170.029146791" watchObservedRunningTime="2026-01-05 23:33:58.458153186 +0000 UTC m=+6170.035650856" Jan 05 23:33:58 crc kubenswrapper[4910]: I0105 23:33:58.476189 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-77fc4ff47-l5mlw" podStartSLOduration=2.9271516699999998 podStartE2EDuration="7.476169606s" podCreationTimestamp="2026-01-05 23:33:51 +0000 UTC" firstStartedPulling="2026-01-05 23:33:52.810334355 +0000 UTC m=+6164.387832025" lastFinishedPulling="2026-01-05 23:33:57.359352291 +0000 UTC m=+6168.936849961" observedRunningTime="2026-01-05 23:33:58.467479988 +0000 UTC m=+6170.044977658" watchObservedRunningTime="2026-01-05 23:33:58.476169606 +0000 UTC m=+6170.053667266" Jan 05 23:34:02 crc kubenswrapper[4910]: I0105 23:34:02.468616 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5bf474d74f-xf7mz" Jan 05 23:34:02 crc kubenswrapper[4910]: I0105 23:34:02.722196 4910 scope.go:117] "RemoveContainer" containerID="f0334e4359831541c2f90c0defd8867b10c126be0235edac4acd76d34a0ba514" Jan 05 23:34:02 crc kubenswrapper[4910]: E0105 23:34:02.722686 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:34:03 crc kubenswrapper[4910]: I0105 23:34:03.442981 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-59bdc8b94-7ndlc" event={"ID":"fd9aa314-35df-4c05-92fd-aa1127e8e80a","Type":"ContainerStarted","Data":"1f60ee42f6260cb44699d33dcbb73ef37519c66e73524932fb78c6e3465f7d69"} Jan 05 23:34:03 crc kubenswrapper[4910]: I0105 23:34:03.443264 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-59bdc8b94-7ndlc" Jan 05 23:34:03 crc kubenswrapper[4910]: I0105 23:34:03.445631 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-59bdc8b94-7ndlc" Jan 05 23:34:03 crc kubenswrapper[4910]: I0105 23:34:03.465684 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-59bdc8b94-7ndlc" podStartSLOduration=3.326054774 podStartE2EDuration="12.465656551s" podCreationTimestamp="2026-01-05 23:33:51 +0000 UTC" firstStartedPulling="2026-01-05 23:33:53.052508671 +0000 UTC m=+6164.630006341" lastFinishedPulling="2026-01-05 23:34:02.192110458 +0000 UTC m=+6173.769608118" observedRunningTime="2026-01-05 23:34:03.461962093 +0000 UTC m=+6175.039459783" watchObservedRunningTime="2026-01-05 23:34:03.465656551 +0000 UTC m=+6175.043154251" Jan 05 23:34:05 crc kubenswrapper[4910]: I0105 23:34:05.834231 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Jan 05 23:34:05 crc kubenswrapper[4910]: I0105 23:34:05.843135 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Jan 05 23:34:05 crc kubenswrapper[4910]: I0105 23:34:05.874736 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 05 23:34:05 crc kubenswrapper[4910]: E0105 23:34:05.875166 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f781aaf-1b6a-4c73-bfed-44881ba13710" containerName="openstackclient" Jan 05 23:34:05 crc kubenswrapper[4910]: I0105 23:34:05.875182 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f781aaf-1b6a-4c73-bfed-44881ba13710" containerName="openstackclient" Jan 05 23:34:05 crc kubenswrapper[4910]: I0105 23:34:05.875393 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f781aaf-1b6a-4c73-bfed-44881ba13710" containerName="openstackclient" Jan 05 23:34:05 crc kubenswrapper[4910]: I0105 23:34:05.876068 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 05 23:34:05 crc kubenswrapper[4910]: I0105 23:34:05.905202 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 05 23:34:05 crc kubenswrapper[4910]: I0105 23:34:05.989464 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/cb59e8ea-232f-490d-aa22-20b19bda2906-openstack-config\") pod \"openstackclient\" (UID: \"cb59e8ea-232f-490d-aa22-20b19bda2906\") " pod="openstack/openstackclient" Jan 05 23:34:05 crc kubenswrapper[4910]: I0105 23:34:05.989606 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jnq94\" (UniqueName: \"kubernetes.io/projected/cb59e8ea-232f-490d-aa22-20b19bda2906-kube-api-access-jnq94\") pod \"openstackclient\" (UID: \"cb59e8ea-232f-490d-aa22-20b19bda2906\") " pod="openstack/openstackclient" Jan 05 23:34:05 crc kubenswrapper[4910]: I0105 23:34:05.989655 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/cb59e8ea-232f-490d-aa22-20b19bda2906-openstack-config-secret\") pod \"openstackclient\" (UID: \"cb59e8ea-232f-490d-aa22-20b19bda2906\") " pod="openstack/openstackclient" Jan 05 23:34:06 crc kubenswrapper[4910]: I0105 23:34:06.046529 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 05 23:34:06 crc kubenswrapper[4910]: I0105 23:34:06.047884 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 05 23:34:06 crc kubenswrapper[4910]: I0105 23:34:06.069755 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-4d9p8" Jan 05 23:34:06 crc kubenswrapper[4910]: I0105 23:34:06.071304 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 05 23:34:06 crc kubenswrapper[4910]: I0105 23:34:06.110687 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jnq94\" (UniqueName: \"kubernetes.io/projected/cb59e8ea-232f-490d-aa22-20b19bda2906-kube-api-access-jnq94\") pod \"openstackclient\" (UID: \"cb59e8ea-232f-490d-aa22-20b19bda2906\") " pod="openstack/openstackclient" Jan 05 23:34:06 crc kubenswrapper[4910]: I0105 23:34:06.110798 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/cb59e8ea-232f-490d-aa22-20b19bda2906-openstack-config-secret\") pod \"openstackclient\" (UID: \"cb59e8ea-232f-490d-aa22-20b19bda2906\") " pod="openstack/openstackclient" Jan 05 23:34:06 crc kubenswrapper[4910]: I0105 23:34:06.110965 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/cb59e8ea-232f-490d-aa22-20b19bda2906-openstack-config\") pod \"openstackclient\" (UID: \"cb59e8ea-232f-490d-aa22-20b19bda2906\") " pod="openstack/openstackclient" Jan 05 23:34:06 crc kubenswrapper[4910]: I0105 23:34:06.112042 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/cb59e8ea-232f-490d-aa22-20b19bda2906-openstack-config\") pod \"openstackclient\" (UID: \"cb59e8ea-232f-490d-aa22-20b19bda2906\") " pod="openstack/openstackclient" Jan 05 23:34:06 crc kubenswrapper[4910]: I0105 23:34:06.138749 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/cb59e8ea-232f-490d-aa22-20b19bda2906-openstack-config-secret\") pod \"openstackclient\" (UID: \"cb59e8ea-232f-490d-aa22-20b19bda2906\") " pod="openstack/openstackclient" Jan 05 23:34:06 crc kubenswrapper[4910]: I0105 23:34:06.166794 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jnq94\" (UniqueName: \"kubernetes.io/projected/cb59e8ea-232f-490d-aa22-20b19bda2906-kube-api-access-jnq94\") pod \"openstackclient\" (UID: \"cb59e8ea-232f-490d-aa22-20b19bda2906\") " pod="openstack/openstackclient" Jan 05 23:34:06 crc kubenswrapper[4910]: I0105 23:34:06.215340 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mh4tj\" (UniqueName: \"kubernetes.io/projected/84e5ccdc-97e4-4be2-ad7d-ff34058e10c5-kube-api-access-mh4tj\") pod \"kube-state-metrics-0\" (UID: \"84e5ccdc-97e4-4be2-ad7d-ff34058e10c5\") " pod="openstack/kube-state-metrics-0" Jan 05 23:34:06 crc kubenswrapper[4910]: I0105 23:34:06.219663 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 05 23:34:06 crc kubenswrapper[4910]: I0105 23:34:06.326864 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mh4tj\" (UniqueName: \"kubernetes.io/projected/84e5ccdc-97e4-4be2-ad7d-ff34058e10c5-kube-api-access-mh4tj\") pod \"kube-state-metrics-0\" (UID: \"84e5ccdc-97e4-4be2-ad7d-ff34058e10c5\") " pod="openstack/kube-state-metrics-0" Jan 05 23:34:06 crc kubenswrapper[4910]: I0105 23:34:06.348238 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mh4tj\" (UniqueName: \"kubernetes.io/projected/84e5ccdc-97e4-4be2-ad7d-ff34058e10c5-kube-api-access-mh4tj\") pod \"kube-state-metrics-0\" (UID: \"84e5ccdc-97e4-4be2-ad7d-ff34058e10c5\") " pod="openstack/kube-state-metrics-0" Jan 05 23:34:06 crc kubenswrapper[4910]: I0105 23:34:06.393000 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 05 23:34:06 crc kubenswrapper[4910]: I0105 23:34:06.482344 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="3f781aaf-1b6a-4c73-bfed-44881ba13710" containerName="openstackclient" containerID="cri-o://c98b5115f6c62650a00bd0840adc489bbc03e6c09b09e3e89d6da607760b0f73" gracePeriod=2 Jan 05 23:34:06 crc kubenswrapper[4910]: I0105 23:34:06.498359 4910 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="3f781aaf-1b6a-4c73-bfed-44881ba13710" podUID="cb59e8ea-232f-490d-aa22-20b19bda2906" Jan 05 23:34:06 crc kubenswrapper[4910]: I0105 23:34:06.932642 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/alertmanager-metric-storage-0"] Jan 05 23:34:06 crc kubenswrapper[4910]: I0105 23:34:06.934949 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/alertmanager-metric-storage-0" Jan 05 23:34:06 crc kubenswrapper[4910]: I0105 23:34:06.966642 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-alertmanager-dockercfg-24xjv" Jan 05 23:34:06 crc kubenswrapper[4910]: I0105 23:34:06.966905 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-generated" Jan 05 23:34:06 crc kubenswrapper[4910]: I0105 23:34:06.966943 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-web-config" Jan 05 23:34:06 crc kubenswrapper[4910]: I0105 23:34:06.967023 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-cluster-tls-config" Jan 05 23:34:06 crc kubenswrapper[4910]: I0105 23:34:06.967169 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-tls-assets-0" Jan 05 23:34:06 crc kubenswrapper[4910]: I0105 23:34:06.983706 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/alertmanager-metric-storage-0"] Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.049318 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/b23e2877-fd92-437d-91c4-97e0391e9355-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"b23e2877-fd92-437d-91c4-97e0391e9355\") " pod="openstack/alertmanager-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.049365 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/b23e2877-fd92-437d-91c4-97e0391e9355-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"b23e2877-fd92-437d-91c4-97e0391e9355\") " pod="openstack/alertmanager-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.049452 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/b23e2877-fd92-437d-91c4-97e0391e9355-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"b23e2877-fd92-437d-91c4-97e0391e9355\") " pod="openstack/alertmanager-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.049473 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/b23e2877-fd92-437d-91c4-97e0391e9355-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"b23e2877-fd92-437d-91c4-97e0391e9355\") " pod="openstack/alertmanager-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.049496 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xzm75\" (UniqueName: \"kubernetes.io/projected/b23e2877-fd92-437d-91c4-97e0391e9355-kube-api-access-xzm75\") pod \"alertmanager-metric-storage-0\" (UID: \"b23e2877-fd92-437d-91c4-97e0391e9355\") " pod="openstack/alertmanager-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.049553 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/b23e2877-fd92-437d-91c4-97e0391e9355-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"b23e2877-fd92-437d-91c4-97e0391e9355\") " pod="openstack/alertmanager-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.049572 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/b23e2877-fd92-437d-91c4-97e0391e9355-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"b23e2877-fd92-437d-91c4-97e0391e9355\") " pod="openstack/alertmanager-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.151543 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/b23e2877-fd92-437d-91c4-97e0391e9355-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"b23e2877-fd92-437d-91c4-97e0391e9355\") " pod="openstack/alertmanager-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.151606 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/b23e2877-fd92-437d-91c4-97e0391e9355-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"b23e2877-fd92-437d-91c4-97e0391e9355\") " pod="openstack/alertmanager-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.151631 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xzm75\" (UniqueName: \"kubernetes.io/projected/b23e2877-fd92-437d-91c4-97e0391e9355-kube-api-access-xzm75\") pod \"alertmanager-metric-storage-0\" (UID: \"b23e2877-fd92-437d-91c4-97e0391e9355\") " pod="openstack/alertmanager-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.151691 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/b23e2877-fd92-437d-91c4-97e0391e9355-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"b23e2877-fd92-437d-91c4-97e0391e9355\") " pod="openstack/alertmanager-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.151711 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/b23e2877-fd92-437d-91c4-97e0391e9355-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"b23e2877-fd92-437d-91c4-97e0391e9355\") " pod="openstack/alertmanager-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.151762 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/b23e2877-fd92-437d-91c4-97e0391e9355-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"b23e2877-fd92-437d-91c4-97e0391e9355\") " pod="openstack/alertmanager-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.151780 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/b23e2877-fd92-437d-91c4-97e0391e9355-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"b23e2877-fd92-437d-91c4-97e0391e9355\") " pod="openstack/alertmanager-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.160661 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/b23e2877-fd92-437d-91c4-97e0391e9355-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"b23e2877-fd92-437d-91c4-97e0391e9355\") " pod="openstack/alertmanager-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.162649 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/b23e2877-fd92-437d-91c4-97e0391e9355-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"b23e2877-fd92-437d-91c4-97e0391e9355\") " pod="openstack/alertmanager-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.175056 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/b23e2877-fd92-437d-91c4-97e0391e9355-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"b23e2877-fd92-437d-91c4-97e0391e9355\") " pod="openstack/alertmanager-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.175300 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/b23e2877-fd92-437d-91c4-97e0391e9355-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"b23e2877-fd92-437d-91c4-97e0391e9355\") " pod="openstack/alertmanager-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.176382 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/b23e2877-fd92-437d-91c4-97e0391e9355-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"b23e2877-fd92-437d-91c4-97e0391e9355\") " pod="openstack/alertmanager-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.176678 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/b23e2877-fd92-437d-91c4-97e0391e9355-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"b23e2877-fd92-437d-91c4-97e0391e9355\") " pod="openstack/alertmanager-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.204033 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xzm75\" (UniqueName: \"kubernetes.io/projected/b23e2877-fd92-437d-91c4-97e0391e9355-kube-api-access-xzm75\") pod \"alertmanager-metric-storage-0\" (UID: \"b23e2877-fd92-437d-91c4-97e0391e9355\") " pod="openstack/alertmanager-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.230436 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.292679 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/alertmanager-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.474239 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.496988 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.520709 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.583789 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.584715 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-jvc5k" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.584839 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.585009 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.585021 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.585161 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.585314 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.587166 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.591923 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.597652 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"84e5ccdc-97e4-4be2-ad7d-ff34058e10c5","Type":"ContainerStarted","Data":"2e6f13893439abf65b4878b02a447ba10c68a262259fecb1b44d236285795eb3"} Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.687652 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/40ee7d85-f633-4577-817a-d8827050a814-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"40ee7d85-f633-4577-817a-d8827050a814\") " pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.687716 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/40ee7d85-f633-4577-817a-d8827050a814-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"40ee7d85-f633-4577-817a-d8827050a814\") " pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.687740 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/40ee7d85-f633-4577-817a-d8827050a814-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"40ee7d85-f633-4577-817a-d8827050a814\") " pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.687778 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/40ee7d85-f633-4577-817a-d8827050a814-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"40ee7d85-f633-4577-817a-d8827050a814\") " pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.687819 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrszv\" (UniqueName: \"kubernetes.io/projected/40ee7d85-f633-4577-817a-d8827050a814-kube-api-access-rrszv\") pod \"prometheus-metric-storage-0\" (UID: \"40ee7d85-f633-4577-817a-d8827050a814\") " pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.687852 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/40ee7d85-f633-4577-817a-d8827050a814-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"40ee7d85-f633-4577-817a-d8827050a814\") " pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.687880 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/40ee7d85-f633-4577-817a-d8827050a814-config\") pod \"prometheus-metric-storage-0\" (UID: \"40ee7d85-f633-4577-817a-d8827050a814\") " pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.687913 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/40ee7d85-f633-4577-817a-d8827050a814-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"40ee7d85-f633-4577-817a-d8827050a814\") " pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.687934 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/40ee7d85-f633-4577-817a-d8827050a814-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"40ee7d85-f633-4577-817a-d8827050a814\") " pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.687963 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-1d8c81ff-130e-4339-a877-0646eec41393\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1d8c81ff-130e-4339-a877-0646eec41393\") pod \"prometheus-metric-storage-0\" (UID: \"40ee7d85-f633-4577-817a-d8827050a814\") " pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.791255 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/40ee7d85-f633-4577-817a-d8827050a814-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"40ee7d85-f633-4577-817a-d8827050a814\") " pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.791349 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/40ee7d85-f633-4577-817a-d8827050a814-config\") pod \"prometheus-metric-storage-0\" (UID: \"40ee7d85-f633-4577-817a-d8827050a814\") " pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.791450 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/40ee7d85-f633-4577-817a-d8827050a814-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"40ee7d85-f633-4577-817a-d8827050a814\") " pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.791496 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/40ee7d85-f633-4577-817a-d8827050a814-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"40ee7d85-f633-4577-817a-d8827050a814\") " pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.791532 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-1d8c81ff-130e-4339-a877-0646eec41393\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1d8c81ff-130e-4339-a877-0646eec41393\") pod \"prometheus-metric-storage-0\" (UID: \"40ee7d85-f633-4577-817a-d8827050a814\") " pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.791645 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/40ee7d85-f633-4577-817a-d8827050a814-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"40ee7d85-f633-4577-817a-d8827050a814\") " pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.791679 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/40ee7d85-f633-4577-817a-d8827050a814-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"40ee7d85-f633-4577-817a-d8827050a814\") " pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.791724 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/40ee7d85-f633-4577-817a-d8827050a814-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"40ee7d85-f633-4577-817a-d8827050a814\") " pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.791763 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/40ee7d85-f633-4577-817a-d8827050a814-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"40ee7d85-f633-4577-817a-d8827050a814\") " pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.791836 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrszv\" (UniqueName: \"kubernetes.io/projected/40ee7d85-f633-4577-817a-d8827050a814-kube-api-access-rrszv\") pod \"prometheus-metric-storage-0\" (UID: \"40ee7d85-f633-4577-817a-d8827050a814\") " pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.798637 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/40ee7d85-f633-4577-817a-d8827050a814-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"40ee7d85-f633-4577-817a-d8827050a814\") " pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.798943 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/40ee7d85-f633-4577-817a-d8827050a814-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"40ee7d85-f633-4577-817a-d8827050a814\") " pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.799163 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/40ee7d85-f633-4577-817a-d8827050a814-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"40ee7d85-f633-4577-817a-d8827050a814\") " pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.804393 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/40ee7d85-f633-4577-817a-d8827050a814-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"40ee7d85-f633-4577-817a-d8827050a814\") " pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.809471 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/40ee7d85-f633-4577-817a-d8827050a814-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"40ee7d85-f633-4577-817a-d8827050a814\") " pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.819828 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/40ee7d85-f633-4577-817a-d8827050a814-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"40ee7d85-f633-4577-817a-d8827050a814\") " pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.821650 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/40ee7d85-f633-4577-817a-d8827050a814-config\") pod \"prometheus-metric-storage-0\" (UID: \"40ee7d85-f633-4577-817a-d8827050a814\") " pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.821685 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/40ee7d85-f633-4577-817a-d8827050a814-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"40ee7d85-f633-4577-817a-d8827050a814\") " pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.825454 4910 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.825486 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-1d8c81ff-130e-4339-a877-0646eec41393\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1d8c81ff-130e-4339-a877-0646eec41393\") pod \"prometheus-metric-storage-0\" (UID: \"40ee7d85-f633-4577-817a-d8827050a814\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/fbf551b2423a7c26fb6a3c7bb87ffdbc734cd80742007943faeaa2a9342521fe/globalmount\"" pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:07 crc kubenswrapper[4910]: I0105 23:34:07.848172 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrszv\" (UniqueName: \"kubernetes.io/projected/40ee7d85-f633-4577-817a-d8827050a814-kube-api-access-rrszv\") pod \"prometheus-metric-storage-0\" (UID: \"40ee7d85-f633-4577-817a-d8827050a814\") " pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:08 crc kubenswrapper[4910]: I0105 23:34:08.004285 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-1d8c81ff-130e-4339-a877-0646eec41393\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1d8c81ff-130e-4339-a877-0646eec41393\") pod \"prometheus-metric-storage-0\" (UID: \"40ee7d85-f633-4577-817a-d8827050a814\") " pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:08 crc kubenswrapper[4910]: I0105 23:34:08.021105 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/alertmanager-metric-storage-0"] Jan 05 23:34:08 crc kubenswrapper[4910]: I0105 23:34:08.206798 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:08 crc kubenswrapper[4910]: I0105 23:34:08.629861 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"84e5ccdc-97e4-4be2-ad7d-ff34058e10c5","Type":"ContainerStarted","Data":"ecd4bde0b9a3b8f2cad8c93e189407332141f93bc1ea9d41b48a3adb0679f1dc"} Jan 05 23:34:08 crc kubenswrapper[4910]: I0105 23:34:08.630482 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 05 23:34:08 crc kubenswrapper[4910]: I0105 23:34:08.641440 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"cb59e8ea-232f-490d-aa22-20b19bda2906","Type":"ContainerStarted","Data":"8a7ce60c99292ee512ff4e3a9a018d7ea9ca8a9b573793014b58b158e96cd06d"} Jan 05 23:34:08 crc kubenswrapper[4910]: I0105 23:34:08.641485 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"cb59e8ea-232f-490d-aa22-20b19bda2906","Type":"ContainerStarted","Data":"1685307e711ca13f32bd6c4150e558f148d5680bc55849b3515071bfc7d73b5e"} Jan 05 23:34:08 crc kubenswrapper[4910]: I0105 23:34:08.675050 4910 generic.go:334] "Generic (PLEG): container finished" podID="3f781aaf-1b6a-4c73-bfed-44881ba13710" containerID="c98b5115f6c62650a00bd0840adc489bbc03e6c09b09e3e89d6da607760b0f73" exitCode=137 Jan 05 23:34:08 crc kubenswrapper[4910]: I0105 23:34:08.697200 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"b23e2877-fd92-437d-91c4-97e0391e9355","Type":"ContainerStarted","Data":"75b03713d60da8a2e09a6832c0e915707805538e94bbb51b9c6455b9c53eafd5"} Jan 05 23:34:08 crc kubenswrapper[4910]: I0105 23:34:08.726910 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.189692053 podStartE2EDuration="2.726883529s" podCreationTimestamp="2026-01-05 23:34:06 +0000 UTC" firstStartedPulling="2026-01-05 23:34:07.238063259 +0000 UTC m=+6178.815560939" lastFinishedPulling="2026-01-05 23:34:07.775254745 +0000 UTC m=+6179.352752415" observedRunningTime="2026-01-05 23:34:08.708770377 +0000 UTC m=+6180.286268047" watchObservedRunningTime="2026-01-05 23:34:08.726883529 +0000 UTC m=+6180.304381199" Jan 05 23:34:08 crc kubenswrapper[4910]: I0105 23:34:08.764660 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=3.76464245 podStartE2EDuration="3.76464245s" podCreationTimestamp="2026-01-05 23:34:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:34:08.759581099 +0000 UTC m=+6180.337078769" watchObservedRunningTime="2026-01-05 23:34:08.76464245 +0000 UTC m=+6180.342140120" Jan 05 23:34:09 crc kubenswrapper[4910]: I0105 23:34:09.244452 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 05 23:34:09 crc kubenswrapper[4910]: I0105 23:34:09.705988 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"40ee7d85-f633-4577-817a-d8827050a814","Type":"ContainerStarted","Data":"537b8c63fcdeabcc27384581655f5ea50ef90025f29d266477a557880fe335e2"} Jan 05 23:34:10 crc kubenswrapper[4910]: I0105 23:34:10.664646 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 05 23:34:10 crc kubenswrapper[4910]: I0105 23:34:10.672142 4910 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="3f781aaf-1b6a-4c73-bfed-44881ba13710" podUID="cb59e8ea-232f-490d-aa22-20b19bda2906" Jan 05 23:34:10 crc kubenswrapper[4910]: I0105 23:34:10.735585 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 05 23:34:10 crc kubenswrapper[4910]: I0105 23:34:10.739160 4910 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="3f781aaf-1b6a-4c73-bfed-44881ba13710" podUID="cb59e8ea-232f-490d-aa22-20b19bda2906" Jan 05 23:34:10 crc kubenswrapper[4910]: I0105 23:34:10.748056 4910 scope.go:117] "RemoveContainer" containerID="c98b5115f6c62650a00bd0840adc489bbc03e6c09b09e3e89d6da607760b0f73" Jan 05 23:34:10 crc kubenswrapper[4910]: I0105 23:34:10.776275 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/3f781aaf-1b6a-4c73-bfed-44881ba13710-openstack-config-secret\") pod \"3f781aaf-1b6a-4c73-bfed-44881ba13710\" (UID: \"3f781aaf-1b6a-4c73-bfed-44881ba13710\") " Jan 05 23:34:10 crc kubenswrapper[4910]: I0105 23:34:10.776400 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/3f781aaf-1b6a-4c73-bfed-44881ba13710-openstack-config\") pod \"3f781aaf-1b6a-4c73-bfed-44881ba13710\" (UID: \"3f781aaf-1b6a-4c73-bfed-44881ba13710\") " Jan 05 23:34:10 crc kubenswrapper[4910]: I0105 23:34:10.776466 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h4d8k\" (UniqueName: \"kubernetes.io/projected/3f781aaf-1b6a-4c73-bfed-44881ba13710-kube-api-access-h4d8k\") pod \"3f781aaf-1b6a-4c73-bfed-44881ba13710\" (UID: \"3f781aaf-1b6a-4c73-bfed-44881ba13710\") " Jan 05 23:34:10 crc kubenswrapper[4910]: I0105 23:34:10.785873 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f781aaf-1b6a-4c73-bfed-44881ba13710-kube-api-access-h4d8k" (OuterVolumeSpecName: "kube-api-access-h4d8k") pod "3f781aaf-1b6a-4c73-bfed-44881ba13710" (UID: "3f781aaf-1b6a-4c73-bfed-44881ba13710"). InnerVolumeSpecName "kube-api-access-h4d8k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:34:10 crc kubenswrapper[4910]: I0105 23:34:10.847363 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f781aaf-1b6a-4c73-bfed-44881ba13710-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "3f781aaf-1b6a-4c73-bfed-44881ba13710" (UID: "3f781aaf-1b6a-4c73-bfed-44881ba13710"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:34:10 crc kubenswrapper[4910]: I0105 23:34:10.856456 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f781aaf-1b6a-4c73-bfed-44881ba13710-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "3f781aaf-1b6a-4c73-bfed-44881ba13710" (UID: "3f781aaf-1b6a-4c73-bfed-44881ba13710"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:34:10 crc kubenswrapper[4910]: I0105 23:34:10.888816 4910 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/3f781aaf-1b6a-4c73-bfed-44881ba13710-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Jan 05 23:34:10 crc kubenswrapper[4910]: I0105 23:34:10.889059 4910 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/3f781aaf-1b6a-4c73-bfed-44881ba13710-openstack-config\") on node \"crc\" DevicePath \"\"" Jan 05 23:34:10 crc kubenswrapper[4910]: I0105 23:34:10.889068 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h4d8k\" (UniqueName: \"kubernetes.io/projected/3f781aaf-1b6a-4c73-bfed-44881ba13710-kube-api-access-h4d8k\") on node \"crc\" DevicePath \"\"" Jan 05 23:34:11 crc kubenswrapper[4910]: I0105 23:34:11.057842 4910 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="3f781aaf-1b6a-4c73-bfed-44881ba13710" podUID="cb59e8ea-232f-490d-aa22-20b19bda2906" Jan 05 23:34:12 crc kubenswrapper[4910]: I0105 23:34:12.750395 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f781aaf-1b6a-4c73-bfed-44881ba13710" path="/var/lib/kubelet/pods/3f781aaf-1b6a-4c73-bfed-44881ba13710/volumes" Jan 05 23:34:16 crc kubenswrapper[4910]: I0105 23:34:16.458100 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 05 23:34:16 crc kubenswrapper[4910]: I0105 23:34:16.722529 4910 scope.go:117] "RemoveContainer" containerID="f0334e4359831541c2f90c0defd8867b10c126be0235edac4acd76d34a0ba514" Jan 05 23:34:16 crc kubenswrapper[4910]: E0105 23:34:16.722761 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:34:16 crc kubenswrapper[4910]: I0105 23:34:16.812051 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"40ee7d85-f633-4577-817a-d8827050a814","Type":"ContainerStarted","Data":"fdc5e8d6c7b1f7001e08d9afcbf897f9e5a14d55b0ece28137a6f682ea60ea0c"} Jan 05 23:34:16 crc kubenswrapper[4910]: I0105 23:34:16.814895 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"b23e2877-fd92-437d-91c4-97e0391e9355","Type":"ContainerStarted","Data":"01d3c457475f161f26422d5f293750b328125c13ea5f64a6657010ffb4dd7e9b"} Jan 05 23:34:20 crc kubenswrapper[4910]: I0105 23:34:20.040225 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-2fjzc"] Jan 05 23:34:20 crc kubenswrapper[4910]: I0105 23:34:20.048856 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-1d78-account-create-update-pxgb2"] Jan 05 23:34:20 crc kubenswrapper[4910]: I0105 23:34:20.060406 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-2fjzc"] Jan 05 23:34:20 crc kubenswrapper[4910]: I0105 23:34:20.068815 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-jtx2j"] Jan 05 23:34:20 crc kubenswrapper[4910]: I0105 23:34:20.079522 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-53ac-account-create-update-pllbn"] Jan 05 23:34:20 crc kubenswrapper[4910]: I0105 23:34:20.088645 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-9041-account-create-update-kjg9f"] Jan 05 23:34:20 crc kubenswrapper[4910]: I0105 23:34:20.096622 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-2dz4h"] Jan 05 23:34:20 crc kubenswrapper[4910]: I0105 23:34:20.103918 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-1d78-account-create-update-pxgb2"] Jan 05 23:34:20 crc kubenswrapper[4910]: I0105 23:34:20.112468 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-jtx2j"] Jan 05 23:34:20 crc kubenswrapper[4910]: I0105 23:34:20.121997 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-9041-account-create-update-kjg9f"] Jan 05 23:34:20 crc kubenswrapper[4910]: I0105 23:34:20.130185 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-53ac-account-create-update-pllbn"] Jan 05 23:34:20 crc kubenswrapper[4910]: I0105 23:34:20.137628 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-2dz4h"] Jan 05 23:34:20 crc kubenswrapper[4910]: I0105 23:34:20.732819 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19df1da5-624a-4a0c-a21f-51ffcfb74941" path="/var/lib/kubelet/pods/19df1da5-624a-4a0c-a21f-51ffcfb74941/volumes" Jan 05 23:34:20 crc kubenswrapper[4910]: I0105 23:34:20.734142 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="531f78d8-c1f4-44f8-873a-0808f37b1dce" path="/var/lib/kubelet/pods/531f78d8-c1f4-44f8-873a-0808f37b1dce/volumes" Jan 05 23:34:20 crc kubenswrapper[4910]: I0105 23:34:20.734698 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee" path="/var/lib/kubelet/pods/5c1ffdd9-b9e0-4b3a-9592-7dc565d4c1ee/volumes" Jan 05 23:34:20 crc kubenswrapper[4910]: I0105 23:34:20.735237 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8dfb60d-eef4-4e99-abda-f6a7de62bf6d" path="/var/lib/kubelet/pods/d8dfb60d-eef4-4e99-abda-f6a7de62bf6d/volumes" Jan 05 23:34:20 crc kubenswrapper[4910]: I0105 23:34:20.736403 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7fec6e1-d1a5-49fb-afe2-36b2965a2049" path="/var/lib/kubelet/pods/e7fec6e1-d1a5-49fb-afe2-36b2965a2049/volumes" Jan 05 23:34:20 crc kubenswrapper[4910]: I0105 23:34:20.736905 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e95d5f5d-4fb4-47c9-ab6e-93d93949ead2" path="/var/lib/kubelet/pods/e95d5f5d-4fb4-47c9-ab6e-93d93949ead2/volumes" Jan 05 23:34:22 crc kubenswrapper[4910]: I0105 23:34:22.879333 4910 generic.go:334] "Generic (PLEG): container finished" podID="40ee7d85-f633-4577-817a-d8827050a814" containerID="fdc5e8d6c7b1f7001e08d9afcbf897f9e5a14d55b0ece28137a6f682ea60ea0c" exitCode=0 Jan 05 23:34:22 crc kubenswrapper[4910]: I0105 23:34:22.879415 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"40ee7d85-f633-4577-817a-d8827050a814","Type":"ContainerDied","Data":"fdc5e8d6c7b1f7001e08d9afcbf897f9e5a14d55b0ece28137a6f682ea60ea0c"} Jan 05 23:34:22 crc kubenswrapper[4910]: I0105 23:34:22.884050 4910 generic.go:334] "Generic (PLEG): container finished" podID="b23e2877-fd92-437d-91c4-97e0391e9355" containerID="01d3c457475f161f26422d5f293750b328125c13ea5f64a6657010ffb4dd7e9b" exitCode=0 Jan 05 23:34:22 crc kubenswrapper[4910]: I0105 23:34:22.884103 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"b23e2877-fd92-437d-91c4-97e0391e9355","Type":"ContainerDied","Data":"01d3c457475f161f26422d5f293750b328125c13ea5f64a6657010ffb4dd7e9b"} Jan 05 23:34:26 crc kubenswrapper[4910]: I0105 23:34:26.936542 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"b23e2877-fd92-437d-91c4-97e0391e9355","Type":"ContainerStarted","Data":"4b4cd4a4603cb5f7ac808d06b8dbb448d55ff5045f6c7daac2ea7417deac1eed"} Jan 05 23:34:29 crc kubenswrapper[4910]: I0105 23:34:29.970943 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"b23e2877-fd92-437d-91c4-97e0391e9355","Type":"ContainerStarted","Data":"00d65994928e3268b120185dde9c367e6c062ae09118598b977fa62a9259d61d"} Jan 05 23:34:29 crc kubenswrapper[4910]: I0105 23:34:29.972310 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/alertmanager-metric-storage-0" Jan 05 23:34:29 crc kubenswrapper[4910]: I0105 23:34:29.974482 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/alertmanager-metric-storage-0" Jan 05 23:34:30 crc kubenswrapper[4910]: I0105 23:34:30.003246 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/alertmanager-metric-storage-0" podStartSLOduration=6.292565275 podStartE2EDuration="24.003224842s" podCreationTimestamp="2026-01-05 23:34:06 +0000 UTC" firstStartedPulling="2026-01-05 23:34:08.051942556 +0000 UTC m=+6179.629440226" lastFinishedPulling="2026-01-05 23:34:25.762602123 +0000 UTC m=+6197.340099793" observedRunningTime="2026-01-05 23:34:29.996661326 +0000 UTC m=+6201.574159056" watchObservedRunningTime="2026-01-05 23:34:30.003224842 +0000 UTC m=+6201.580722512" Jan 05 23:34:30 crc kubenswrapper[4910]: I0105 23:34:30.039285 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-qqqc6"] Jan 05 23:34:30 crc kubenswrapper[4910]: I0105 23:34:30.073154 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-qqqc6"] Jan 05 23:34:30 crc kubenswrapper[4910]: I0105 23:34:30.721829 4910 scope.go:117] "RemoveContainer" containerID="f0334e4359831541c2f90c0defd8867b10c126be0235edac4acd76d34a0ba514" Jan 05 23:34:30 crc kubenswrapper[4910]: E0105 23:34:30.722372 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:34:30 crc kubenswrapper[4910]: I0105 23:34:30.735901 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a3f6562b-e771-4c65-a192-d4ac8412ab54" path="/var/lib/kubelet/pods/a3f6562b-e771-4c65-a192-d4ac8412ab54/volumes" Jan 05 23:34:30 crc kubenswrapper[4910]: I0105 23:34:30.987275 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"40ee7d85-f633-4577-817a-d8827050a814","Type":"ContainerStarted","Data":"37c3c298413cdaa79bfe979b34527473f666ae9af0ab68c5e3fe06f9eda198af"} Jan 05 23:34:37 crc kubenswrapper[4910]: I0105 23:34:37.064953 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"40ee7d85-f633-4577-817a-d8827050a814","Type":"ContainerStarted","Data":"9e1bbf85be83b48e6ac40b525331b4d5c229468010819381fb5c3206dd5205c5"} Jan 05 23:34:41 crc kubenswrapper[4910]: I0105 23:34:41.209876 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"40ee7d85-f633-4577-817a-d8827050a814","Type":"ContainerStarted","Data":"63c06c43e9d116a2c9896aaf31bc44e5c6490fcf917bd502306ae50cb5561777"} Jan 05 23:34:41 crc kubenswrapper[4910]: I0105 23:34:41.244950 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=4.195120408 podStartE2EDuration="35.244921058s" podCreationTimestamp="2026-01-05 23:34:06 +0000 UTC" firstStartedPulling="2026-01-05 23:34:09.283101099 +0000 UTC m=+6180.860598769" lastFinishedPulling="2026-01-05 23:34:40.332901739 +0000 UTC m=+6211.910399419" observedRunningTime="2026-01-05 23:34:41.240256707 +0000 UTC m=+6212.817754437" watchObservedRunningTime="2026-01-05 23:34:41.244921058 +0000 UTC m=+6212.822418738" Jan 05 23:34:43 crc kubenswrapper[4910]: I0105 23:34:43.207985 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:43 crc kubenswrapper[4910]: I0105 23:34:43.722906 4910 scope.go:117] "RemoveContainer" containerID="f0334e4359831541c2f90c0defd8867b10c126be0235edac4acd76d34a0ba514" Jan 05 23:34:43 crc kubenswrapper[4910]: E0105 23:34:43.723899 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:34:46 crc kubenswrapper[4910]: I0105 23:34:46.694135 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 05 23:34:46 crc kubenswrapper[4910]: I0105 23:34:46.697487 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 23:34:46 crc kubenswrapper[4910]: I0105 23:34:46.700016 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 05 23:34:46 crc kubenswrapper[4910]: I0105 23:34:46.714567 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 05 23:34:46 crc kubenswrapper[4910]: I0105 23:34:46.715458 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 05 23:34:46 crc kubenswrapper[4910]: I0105 23:34:46.838468 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bdpsx\" (UniqueName: \"kubernetes.io/projected/b448d106-d07f-4d39-ba3f-3b8904ef7baf-kube-api-access-bdpsx\") pod \"ceilometer-0\" (UID: \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\") " pod="openstack/ceilometer-0" Jan 05 23:34:46 crc kubenswrapper[4910]: I0105 23:34:46.838576 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b448d106-d07f-4d39-ba3f-3b8904ef7baf-scripts\") pod \"ceilometer-0\" (UID: \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\") " pod="openstack/ceilometer-0" Jan 05 23:34:46 crc kubenswrapper[4910]: I0105 23:34:46.838604 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b448d106-d07f-4d39-ba3f-3b8904ef7baf-config-data\") pod \"ceilometer-0\" (UID: \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\") " pod="openstack/ceilometer-0" Jan 05 23:34:46 crc kubenswrapper[4910]: I0105 23:34:46.838647 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b448d106-d07f-4d39-ba3f-3b8904ef7baf-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\") " pod="openstack/ceilometer-0" Jan 05 23:34:46 crc kubenswrapper[4910]: I0105 23:34:46.838936 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b448d106-d07f-4d39-ba3f-3b8904ef7baf-log-httpd\") pod \"ceilometer-0\" (UID: \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\") " pod="openstack/ceilometer-0" Jan 05 23:34:46 crc kubenswrapper[4910]: I0105 23:34:46.838983 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b448d106-d07f-4d39-ba3f-3b8904ef7baf-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\") " pod="openstack/ceilometer-0" Jan 05 23:34:46 crc kubenswrapper[4910]: I0105 23:34:46.839204 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b448d106-d07f-4d39-ba3f-3b8904ef7baf-run-httpd\") pod \"ceilometer-0\" (UID: \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\") " pod="openstack/ceilometer-0" Jan 05 23:34:46 crc kubenswrapper[4910]: I0105 23:34:46.941494 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b448d106-d07f-4d39-ba3f-3b8904ef7baf-log-httpd\") pod \"ceilometer-0\" (UID: \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\") " pod="openstack/ceilometer-0" Jan 05 23:34:46 crc kubenswrapper[4910]: I0105 23:34:46.941536 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b448d106-d07f-4d39-ba3f-3b8904ef7baf-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\") " pod="openstack/ceilometer-0" Jan 05 23:34:46 crc kubenswrapper[4910]: I0105 23:34:46.941601 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b448d106-d07f-4d39-ba3f-3b8904ef7baf-run-httpd\") pod \"ceilometer-0\" (UID: \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\") " pod="openstack/ceilometer-0" Jan 05 23:34:46 crc kubenswrapper[4910]: I0105 23:34:46.941669 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bdpsx\" (UniqueName: \"kubernetes.io/projected/b448d106-d07f-4d39-ba3f-3b8904ef7baf-kube-api-access-bdpsx\") pod \"ceilometer-0\" (UID: \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\") " pod="openstack/ceilometer-0" Jan 05 23:34:46 crc kubenswrapper[4910]: I0105 23:34:46.941714 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b448d106-d07f-4d39-ba3f-3b8904ef7baf-scripts\") pod \"ceilometer-0\" (UID: \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\") " pod="openstack/ceilometer-0" Jan 05 23:34:46 crc kubenswrapper[4910]: I0105 23:34:46.941730 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b448d106-d07f-4d39-ba3f-3b8904ef7baf-config-data\") pod \"ceilometer-0\" (UID: \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\") " pod="openstack/ceilometer-0" Jan 05 23:34:46 crc kubenswrapper[4910]: I0105 23:34:46.941756 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b448d106-d07f-4d39-ba3f-3b8904ef7baf-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\") " pod="openstack/ceilometer-0" Jan 05 23:34:46 crc kubenswrapper[4910]: I0105 23:34:46.942267 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b448d106-d07f-4d39-ba3f-3b8904ef7baf-log-httpd\") pod \"ceilometer-0\" (UID: \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\") " pod="openstack/ceilometer-0" Jan 05 23:34:46 crc kubenswrapper[4910]: I0105 23:34:46.942345 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b448d106-d07f-4d39-ba3f-3b8904ef7baf-run-httpd\") pod \"ceilometer-0\" (UID: \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\") " pod="openstack/ceilometer-0" Jan 05 23:34:46 crc kubenswrapper[4910]: I0105 23:34:46.948375 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b448d106-d07f-4d39-ba3f-3b8904ef7baf-config-data\") pod \"ceilometer-0\" (UID: \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\") " pod="openstack/ceilometer-0" Jan 05 23:34:46 crc kubenswrapper[4910]: I0105 23:34:46.950552 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b448d106-d07f-4d39-ba3f-3b8904ef7baf-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\") " pod="openstack/ceilometer-0" Jan 05 23:34:46 crc kubenswrapper[4910]: I0105 23:34:46.953066 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b448d106-d07f-4d39-ba3f-3b8904ef7baf-scripts\") pod \"ceilometer-0\" (UID: \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\") " pod="openstack/ceilometer-0" Jan 05 23:34:46 crc kubenswrapper[4910]: I0105 23:34:46.959293 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b448d106-d07f-4d39-ba3f-3b8904ef7baf-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\") " pod="openstack/ceilometer-0" Jan 05 23:34:46 crc kubenswrapper[4910]: I0105 23:34:46.965412 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bdpsx\" (UniqueName: \"kubernetes.io/projected/b448d106-d07f-4d39-ba3f-3b8904ef7baf-kube-api-access-bdpsx\") pod \"ceilometer-0\" (UID: \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\") " pod="openstack/ceilometer-0" Jan 05 23:34:47 crc kubenswrapper[4910]: I0105 23:34:47.015731 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 23:34:47 crc kubenswrapper[4910]: I0105 23:34:47.532643 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 05 23:34:47 crc kubenswrapper[4910]: I0105 23:34:47.728775 4910 scope.go:117] "RemoveContainer" containerID="073856c0d67c155e57154f42bd7c12e2c86e471b010f40848bf1ebb6ae9e9e30" Jan 05 23:34:47 crc kubenswrapper[4910]: I0105 23:34:47.774473 4910 scope.go:117] "RemoveContainer" containerID="f0b1375ad573ba32072197e2efbf5d0227f025dd09b18f47a2f36ca927bd609b" Jan 05 23:34:47 crc kubenswrapper[4910]: I0105 23:34:47.817970 4910 scope.go:117] "RemoveContainer" containerID="c947c2a056ee6f7110a8c07485a5dcb761bff4a7ae92b78f95e34531366f2aaa" Jan 05 23:34:47 crc kubenswrapper[4910]: I0105 23:34:47.844786 4910 scope.go:117] "RemoveContainer" containerID="8e37bac4e5fde22fcb4959bed1467d329ca4527e73c3b3986bd31942a9439093" Jan 05 23:34:47 crc kubenswrapper[4910]: I0105 23:34:47.863878 4910 scope.go:117] "RemoveContainer" containerID="35f32c545e0ce39a7424bf29abc17ae9bc44202aae5316fe585acab249b0b55e" Jan 05 23:34:47 crc kubenswrapper[4910]: I0105 23:34:47.884823 4910 scope.go:117] "RemoveContainer" containerID="412790b1c5b4cbd811e41a4cf74d8b0ce15938128188cd6dbed27b1e74e7d589" Jan 05 23:34:47 crc kubenswrapper[4910]: I0105 23:34:47.906746 4910 scope.go:117] "RemoveContainer" containerID="ad6f4b7dacbc8ca936f715152f6fb568538c029cf558061706a109206cbb2cd1" Jan 05 23:34:48 crc kubenswrapper[4910]: I0105 23:34:48.283079 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b448d106-d07f-4d39-ba3f-3b8904ef7baf","Type":"ContainerStarted","Data":"0bcdcb0ea3f70c9a4f5d2731001de4a0d7eecb7eec67f6ed757b44e94ba62312"} Jan 05 23:34:49 crc kubenswrapper[4910]: I0105 23:34:49.054101 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-xgtkn"] Jan 05 23:34:49 crc kubenswrapper[4910]: I0105 23:34:49.064331 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-xgtkn"] Jan 05 23:34:49 crc kubenswrapper[4910]: I0105 23:34:49.293080 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b448d106-d07f-4d39-ba3f-3b8904ef7baf","Type":"ContainerStarted","Data":"db853bf3b3c0441fde93e0dfb51ade8de1d1bbb66119c67220f6ffa824e6e5cb"} Jan 05 23:34:50 crc kubenswrapper[4910]: I0105 23:34:50.031152 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-7j6xw"] Jan 05 23:34:50 crc kubenswrapper[4910]: I0105 23:34:50.039905 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-7j6xw"] Jan 05 23:34:50 crc kubenswrapper[4910]: I0105 23:34:50.304707 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b448d106-d07f-4d39-ba3f-3b8904ef7baf","Type":"ContainerStarted","Data":"341785a368782f7a4ae4b670d6b18bd1735a5957dfe50035ef849aa7f2b5c1bf"} Jan 05 23:34:50 crc kubenswrapper[4910]: I0105 23:34:50.305060 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b448d106-d07f-4d39-ba3f-3b8904ef7baf","Type":"ContainerStarted","Data":"ba737a9e701e201433ff1810e7f1e1a74e1d07465ae371169f375befac870a92"} Jan 05 23:34:50 crc kubenswrapper[4910]: I0105 23:34:50.734088 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b03dbb9-5f91-445d-8413-af5468bac3a4" path="/var/lib/kubelet/pods/2b03dbb9-5f91-445d-8413-af5468bac3a4/volumes" Jan 05 23:34:50 crc kubenswrapper[4910]: I0105 23:34:50.735309 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="987fcce4-1c3a-4ffb-b340-65abf751215a" path="/var/lib/kubelet/pods/987fcce4-1c3a-4ffb-b340-65abf751215a/volumes" Jan 05 23:34:52 crc kubenswrapper[4910]: I0105 23:34:52.340955 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b448d106-d07f-4d39-ba3f-3b8904ef7baf","Type":"ContainerStarted","Data":"bf744520b2a5567cdbf53e2874c9fa07164e3fec9ac16b4055044431b58d7417"} Jan 05 23:34:52 crc kubenswrapper[4910]: I0105 23:34:52.341592 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 05 23:34:52 crc kubenswrapper[4910]: I0105 23:34:52.376581 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.561886521 podStartE2EDuration="6.376559718s" podCreationTimestamp="2026-01-05 23:34:46 +0000 UTC" firstStartedPulling="2026-01-05 23:34:47.570305844 +0000 UTC m=+6219.147803524" lastFinishedPulling="2026-01-05 23:34:51.384979051 +0000 UTC m=+6222.962476721" observedRunningTime="2026-01-05 23:34:52.372640384 +0000 UTC m=+6223.950138074" watchObservedRunningTime="2026-01-05 23:34:52.376559718 +0000 UTC m=+6223.954057388" Jan 05 23:34:53 crc kubenswrapper[4910]: I0105 23:34:53.207870 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:53 crc kubenswrapper[4910]: I0105 23:34:53.214293 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:53 crc kubenswrapper[4910]: I0105 23:34:53.354871 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Jan 05 23:34:55 crc kubenswrapper[4910]: I0105 23:34:55.721879 4910 scope.go:117] "RemoveContainer" containerID="f0334e4359831541c2f90c0defd8867b10c126be0235edac4acd76d34a0ba514" Jan 05 23:34:55 crc kubenswrapper[4910]: E0105 23:34:55.722648 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:34:58 crc kubenswrapper[4910]: I0105 23:34:58.770970 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-create-f99t2"] Jan 05 23:34:58 crc kubenswrapper[4910]: I0105 23:34:58.773437 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-f99t2" Jan 05 23:34:58 crc kubenswrapper[4910]: I0105 23:34:58.783959 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-f99t2"] Jan 05 23:34:58 crc kubenswrapper[4910]: I0105 23:34:58.813204 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/79b2932f-d96b-45db-bea6-e821af5a8388-operator-scripts\") pod \"aodh-db-create-f99t2\" (UID: \"79b2932f-d96b-45db-bea6-e821af5a8388\") " pod="openstack/aodh-db-create-f99t2" Jan 05 23:34:58 crc kubenswrapper[4910]: I0105 23:34:58.813340 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-47f5z\" (UniqueName: \"kubernetes.io/projected/79b2932f-d96b-45db-bea6-e821af5a8388-kube-api-access-47f5z\") pod \"aodh-db-create-f99t2\" (UID: \"79b2932f-d96b-45db-bea6-e821af5a8388\") " pod="openstack/aodh-db-create-f99t2" Jan 05 23:34:58 crc kubenswrapper[4910]: I0105 23:34:58.914958 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-47f5z\" (UniqueName: \"kubernetes.io/projected/79b2932f-d96b-45db-bea6-e821af5a8388-kube-api-access-47f5z\") pod \"aodh-db-create-f99t2\" (UID: \"79b2932f-d96b-45db-bea6-e821af5a8388\") " pod="openstack/aodh-db-create-f99t2" Jan 05 23:34:58 crc kubenswrapper[4910]: I0105 23:34:58.915102 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/79b2932f-d96b-45db-bea6-e821af5a8388-operator-scripts\") pod \"aodh-db-create-f99t2\" (UID: \"79b2932f-d96b-45db-bea6-e821af5a8388\") " pod="openstack/aodh-db-create-f99t2" Jan 05 23:34:58 crc kubenswrapper[4910]: I0105 23:34:58.915909 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/79b2932f-d96b-45db-bea6-e821af5a8388-operator-scripts\") pod \"aodh-db-create-f99t2\" (UID: \"79b2932f-d96b-45db-bea6-e821af5a8388\") " pod="openstack/aodh-db-create-f99t2" Jan 05 23:34:58 crc kubenswrapper[4910]: I0105 23:34:58.944826 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-47f5z\" (UniqueName: \"kubernetes.io/projected/79b2932f-d96b-45db-bea6-e821af5a8388-kube-api-access-47f5z\") pod \"aodh-db-create-f99t2\" (UID: \"79b2932f-d96b-45db-bea6-e821af5a8388\") " pod="openstack/aodh-db-create-f99t2" Jan 05 23:34:58 crc kubenswrapper[4910]: I0105 23:34:58.975538 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-5735-account-create-update-h89vj"] Jan 05 23:34:58 crc kubenswrapper[4910]: I0105 23:34:58.976932 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-5735-account-create-update-h89vj" Jan 05 23:34:58 crc kubenswrapper[4910]: I0105 23:34:58.988884 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-db-secret" Jan 05 23:34:58 crc kubenswrapper[4910]: I0105 23:34:58.990917 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-5735-account-create-update-h89vj"] Jan 05 23:34:59 crc kubenswrapper[4910]: I0105 23:34:59.016967 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h7fwp\" (UniqueName: \"kubernetes.io/projected/36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8-kube-api-access-h7fwp\") pod \"aodh-5735-account-create-update-h89vj\" (UID: \"36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8\") " pod="openstack/aodh-5735-account-create-update-h89vj" Jan 05 23:34:59 crc kubenswrapper[4910]: I0105 23:34:59.017033 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8-operator-scripts\") pod \"aodh-5735-account-create-update-h89vj\" (UID: \"36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8\") " pod="openstack/aodh-5735-account-create-update-h89vj" Jan 05 23:34:59 crc kubenswrapper[4910]: I0105 23:34:59.093449 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-f99t2" Jan 05 23:34:59 crc kubenswrapper[4910]: I0105 23:34:59.118833 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8-operator-scripts\") pod \"aodh-5735-account-create-update-h89vj\" (UID: \"36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8\") " pod="openstack/aodh-5735-account-create-update-h89vj" Jan 05 23:34:59 crc kubenswrapper[4910]: I0105 23:34:59.119573 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8-operator-scripts\") pod \"aodh-5735-account-create-update-h89vj\" (UID: \"36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8\") " pod="openstack/aodh-5735-account-create-update-h89vj" Jan 05 23:34:59 crc kubenswrapper[4910]: I0105 23:34:59.119805 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h7fwp\" (UniqueName: \"kubernetes.io/projected/36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8-kube-api-access-h7fwp\") pod \"aodh-5735-account-create-update-h89vj\" (UID: \"36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8\") " pod="openstack/aodh-5735-account-create-update-h89vj" Jan 05 23:34:59 crc kubenswrapper[4910]: I0105 23:34:59.142655 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h7fwp\" (UniqueName: \"kubernetes.io/projected/36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8-kube-api-access-h7fwp\") pod \"aodh-5735-account-create-update-h89vj\" (UID: \"36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8\") " pod="openstack/aodh-5735-account-create-update-h89vj" Jan 05 23:34:59 crc kubenswrapper[4910]: I0105 23:34:59.324651 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-5735-account-create-update-h89vj" Jan 05 23:34:59 crc kubenswrapper[4910]: I0105 23:34:59.599146 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-f99t2"] Jan 05 23:34:59 crc kubenswrapper[4910]: W0105 23:34:59.600961 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod79b2932f_d96b_45db_bea6_e821af5a8388.slice/crio-e8a3ce43e92c0831941c3d20bd408adb98dc3a1a515af8605875c1eb16ba11d9 WatchSource:0}: Error finding container e8a3ce43e92c0831941c3d20bd408adb98dc3a1a515af8605875c1eb16ba11d9: Status 404 returned error can't find the container with id e8a3ce43e92c0831941c3d20bd408adb98dc3a1a515af8605875c1eb16ba11d9 Jan 05 23:34:59 crc kubenswrapper[4910]: I0105 23:34:59.815315 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-5735-account-create-update-h89vj"] Jan 05 23:34:59 crc kubenswrapper[4910]: W0105 23:34:59.817758 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod36c7e70d_6b03_4cf9_9ef8_a1afa8af62c8.slice/crio-b06ab71b058a964479a16305cbd9c73a1f18ac32fa4c4919b59949d7af8e94c6 WatchSource:0}: Error finding container b06ab71b058a964479a16305cbd9c73a1f18ac32fa4c4919b59949d7af8e94c6: Status 404 returned error can't find the container with id b06ab71b058a964479a16305cbd9c73a1f18ac32fa4c4919b59949d7af8e94c6 Jan 05 23:35:00 crc kubenswrapper[4910]: I0105 23:35:00.436453 4910 generic.go:334] "Generic (PLEG): container finished" podID="79b2932f-d96b-45db-bea6-e821af5a8388" containerID="43d4d33eb21111d88b658abbd42893a7e3e86c9ae6f69d028289560f18a8e5b8" exitCode=0 Jan 05 23:35:00 crc kubenswrapper[4910]: I0105 23:35:00.436593 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-f99t2" event={"ID":"79b2932f-d96b-45db-bea6-e821af5a8388","Type":"ContainerDied","Data":"43d4d33eb21111d88b658abbd42893a7e3e86c9ae6f69d028289560f18a8e5b8"} Jan 05 23:35:00 crc kubenswrapper[4910]: I0105 23:35:00.436636 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-f99t2" event={"ID":"79b2932f-d96b-45db-bea6-e821af5a8388","Type":"ContainerStarted","Data":"e8a3ce43e92c0831941c3d20bd408adb98dc3a1a515af8605875c1eb16ba11d9"} Jan 05 23:35:00 crc kubenswrapper[4910]: I0105 23:35:00.441081 4910 generic.go:334] "Generic (PLEG): container finished" podID="36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8" containerID="b429ebe7e9a6cd4ee878dc5d7803620fe414fc2f451430fad629ac4f3937e289" exitCode=0 Jan 05 23:35:00 crc kubenswrapper[4910]: I0105 23:35:00.441143 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-5735-account-create-update-h89vj" event={"ID":"36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8","Type":"ContainerDied","Data":"b429ebe7e9a6cd4ee878dc5d7803620fe414fc2f451430fad629ac4f3937e289"} Jan 05 23:35:00 crc kubenswrapper[4910]: I0105 23:35:00.441171 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-5735-account-create-update-h89vj" event={"ID":"36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8","Type":"ContainerStarted","Data":"b06ab71b058a964479a16305cbd9c73a1f18ac32fa4c4919b59949d7af8e94c6"} Jan 05 23:35:02 crc kubenswrapper[4910]: I0105 23:35:02.090441 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-f99t2" Jan 05 23:35:02 crc kubenswrapper[4910]: I0105 23:35:02.095395 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-5735-account-create-update-h89vj" Jan 05 23:35:02 crc kubenswrapper[4910]: I0105 23:35:02.285499 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8-operator-scripts\") pod \"36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8\" (UID: \"36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8\") " Jan 05 23:35:02 crc kubenswrapper[4910]: I0105 23:35:02.285581 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-47f5z\" (UniqueName: \"kubernetes.io/projected/79b2932f-d96b-45db-bea6-e821af5a8388-kube-api-access-47f5z\") pod \"79b2932f-d96b-45db-bea6-e821af5a8388\" (UID: \"79b2932f-d96b-45db-bea6-e821af5a8388\") " Jan 05 23:35:02 crc kubenswrapper[4910]: I0105 23:35:02.285620 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h7fwp\" (UniqueName: \"kubernetes.io/projected/36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8-kube-api-access-h7fwp\") pod \"36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8\" (UID: \"36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8\") " Jan 05 23:35:02 crc kubenswrapper[4910]: I0105 23:35:02.285658 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/79b2932f-d96b-45db-bea6-e821af5a8388-operator-scripts\") pod \"79b2932f-d96b-45db-bea6-e821af5a8388\" (UID: \"79b2932f-d96b-45db-bea6-e821af5a8388\") " Jan 05 23:35:02 crc kubenswrapper[4910]: I0105 23:35:02.286531 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8" (UID: "36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:35:02 crc kubenswrapper[4910]: I0105 23:35:02.286750 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79b2932f-d96b-45db-bea6-e821af5a8388-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "79b2932f-d96b-45db-bea6-e821af5a8388" (UID: "79b2932f-d96b-45db-bea6-e821af5a8388"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:35:02 crc kubenswrapper[4910]: I0105 23:35:02.292219 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79b2932f-d96b-45db-bea6-e821af5a8388-kube-api-access-47f5z" (OuterVolumeSpecName: "kube-api-access-47f5z") pod "79b2932f-d96b-45db-bea6-e821af5a8388" (UID: "79b2932f-d96b-45db-bea6-e821af5a8388"). InnerVolumeSpecName "kube-api-access-47f5z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:35:02 crc kubenswrapper[4910]: I0105 23:35:02.292282 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8-kube-api-access-h7fwp" (OuterVolumeSpecName: "kube-api-access-h7fwp") pod "36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8" (UID: "36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8"). InnerVolumeSpecName "kube-api-access-h7fwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:35:02 crc kubenswrapper[4910]: I0105 23:35:02.388476 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:35:02 crc kubenswrapper[4910]: I0105 23:35:02.388788 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-47f5z\" (UniqueName: \"kubernetes.io/projected/79b2932f-d96b-45db-bea6-e821af5a8388-kube-api-access-47f5z\") on node \"crc\" DevicePath \"\"" Jan 05 23:35:02 crc kubenswrapper[4910]: I0105 23:35:02.388798 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h7fwp\" (UniqueName: \"kubernetes.io/projected/36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8-kube-api-access-h7fwp\") on node \"crc\" DevicePath \"\"" Jan 05 23:35:02 crc kubenswrapper[4910]: I0105 23:35:02.388811 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/79b2932f-d96b-45db-bea6-e821af5a8388-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:35:02 crc kubenswrapper[4910]: I0105 23:35:02.460213 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-f99t2" Jan 05 23:35:02 crc kubenswrapper[4910]: I0105 23:35:02.460266 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-f99t2" event={"ID":"79b2932f-d96b-45db-bea6-e821af5a8388","Type":"ContainerDied","Data":"e8a3ce43e92c0831941c3d20bd408adb98dc3a1a515af8605875c1eb16ba11d9"} Jan 05 23:35:02 crc kubenswrapper[4910]: I0105 23:35:02.460318 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e8a3ce43e92c0831941c3d20bd408adb98dc3a1a515af8605875c1eb16ba11d9" Jan 05 23:35:02 crc kubenswrapper[4910]: I0105 23:35:02.463016 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-5735-account-create-update-h89vj" event={"ID":"36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8","Type":"ContainerDied","Data":"b06ab71b058a964479a16305cbd9c73a1f18ac32fa4c4919b59949d7af8e94c6"} Jan 05 23:35:02 crc kubenswrapper[4910]: I0105 23:35:02.463048 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b06ab71b058a964479a16305cbd9c73a1f18ac32fa4c4919b59949d7af8e94c6" Jan 05 23:35:02 crc kubenswrapper[4910]: I0105 23:35:02.463112 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-5735-account-create-update-h89vj" Jan 05 23:35:04 crc kubenswrapper[4910]: I0105 23:35:04.433553 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-2w8sc"] Jan 05 23:35:04 crc kubenswrapper[4910]: E0105 23:35:04.434382 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8" containerName="mariadb-account-create-update" Jan 05 23:35:04 crc kubenswrapper[4910]: I0105 23:35:04.434402 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8" containerName="mariadb-account-create-update" Jan 05 23:35:04 crc kubenswrapper[4910]: E0105 23:35:04.434431 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79b2932f-d96b-45db-bea6-e821af5a8388" containerName="mariadb-database-create" Jan 05 23:35:04 crc kubenswrapper[4910]: I0105 23:35:04.434442 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="79b2932f-d96b-45db-bea6-e821af5a8388" containerName="mariadb-database-create" Jan 05 23:35:04 crc kubenswrapper[4910]: I0105 23:35:04.434715 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8" containerName="mariadb-account-create-update" Jan 05 23:35:04 crc kubenswrapper[4910]: I0105 23:35:04.434749 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="79b2932f-d96b-45db-bea6-e821af5a8388" containerName="mariadb-database-create" Jan 05 23:35:04 crc kubenswrapper[4910]: I0105 23:35:04.435697 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-2w8sc" Jan 05 23:35:04 crc kubenswrapper[4910]: I0105 23:35:04.440909 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 05 23:35:04 crc kubenswrapper[4910]: I0105 23:35:04.441406 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Jan 05 23:35:04 crc kubenswrapper[4910]: I0105 23:35:04.441407 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-pnrzn" Jan 05 23:35:04 crc kubenswrapper[4910]: I0105 23:35:04.441689 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Jan 05 23:35:04 crc kubenswrapper[4910]: I0105 23:35:04.449269 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-2w8sc"] Jan 05 23:35:04 crc kubenswrapper[4910]: I0105 23:35:04.541401 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2ff657b-9e47-472a-9ff1-eda124dd4db8-combined-ca-bundle\") pod \"aodh-db-sync-2w8sc\" (UID: \"b2ff657b-9e47-472a-9ff1-eda124dd4db8\") " pod="openstack/aodh-db-sync-2w8sc" Jan 05 23:35:04 crc kubenswrapper[4910]: I0105 23:35:04.541754 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ltq9\" (UniqueName: \"kubernetes.io/projected/b2ff657b-9e47-472a-9ff1-eda124dd4db8-kube-api-access-4ltq9\") pod \"aodh-db-sync-2w8sc\" (UID: \"b2ff657b-9e47-472a-9ff1-eda124dd4db8\") " pod="openstack/aodh-db-sync-2w8sc" Jan 05 23:35:04 crc kubenswrapper[4910]: I0105 23:35:04.541836 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2ff657b-9e47-472a-9ff1-eda124dd4db8-scripts\") pod \"aodh-db-sync-2w8sc\" (UID: \"b2ff657b-9e47-472a-9ff1-eda124dd4db8\") " pod="openstack/aodh-db-sync-2w8sc" Jan 05 23:35:04 crc kubenswrapper[4910]: I0105 23:35:04.541853 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2ff657b-9e47-472a-9ff1-eda124dd4db8-config-data\") pod \"aodh-db-sync-2w8sc\" (UID: \"b2ff657b-9e47-472a-9ff1-eda124dd4db8\") " pod="openstack/aodh-db-sync-2w8sc" Jan 05 23:35:04 crc kubenswrapper[4910]: I0105 23:35:04.643178 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2ff657b-9e47-472a-9ff1-eda124dd4db8-combined-ca-bundle\") pod \"aodh-db-sync-2w8sc\" (UID: \"b2ff657b-9e47-472a-9ff1-eda124dd4db8\") " pod="openstack/aodh-db-sync-2w8sc" Jan 05 23:35:04 crc kubenswrapper[4910]: I0105 23:35:04.643247 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ltq9\" (UniqueName: \"kubernetes.io/projected/b2ff657b-9e47-472a-9ff1-eda124dd4db8-kube-api-access-4ltq9\") pod \"aodh-db-sync-2w8sc\" (UID: \"b2ff657b-9e47-472a-9ff1-eda124dd4db8\") " pod="openstack/aodh-db-sync-2w8sc" Jan 05 23:35:04 crc kubenswrapper[4910]: I0105 23:35:04.643322 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2ff657b-9e47-472a-9ff1-eda124dd4db8-scripts\") pod \"aodh-db-sync-2w8sc\" (UID: \"b2ff657b-9e47-472a-9ff1-eda124dd4db8\") " pod="openstack/aodh-db-sync-2w8sc" Jan 05 23:35:04 crc kubenswrapper[4910]: I0105 23:35:04.643343 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2ff657b-9e47-472a-9ff1-eda124dd4db8-config-data\") pod \"aodh-db-sync-2w8sc\" (UID: \"b2ff657b-9e47-472a-9ff1-eda124dd4db8\") " pod="openstack/aodh-db-sync-2w8sc" Jan 05 23:35:04 crc kubenswrapper[4910]: I0105 23:35:04.651445 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2ff657b-9e47-472a-9ff1-eda124dd4db8-combined-ca-bundle\") pod \"aodh-db-sync-2w8sc\" (UID: \"b2ff657b-9e47-472a-9ff1-eda124dd4db8\") " pod="openstack/aodh-db-sync-2w8sc" Jan 05 23:35:04 crc kubenswrapper[4910]: I0105 23:35:04.651900 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2ff657b-9e47-472a-9ff1-eda124dd4db8-config-data\") pod \"aodh-db-sync-2w8sc\" (UID: \"b2ff657b-9e47-472a-9ff1-eda124dd4db8\") " pod="openstack/aodh-db-sync-2w8sc" Jan 05 23:35:04 crc kubenswrapper[4910]: I0105 23:35:04.654419 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2ff657b-9e47-472a-9ff1-eda124dd4db8-scripts\") pod \"aodh-db-sync-2w8sc\" (UID: \"b2ff657b-9e47-472a-9ff1-eda124dd4db8\") " pod="openstack/aodh-db-sync-2w8sc" Jan 05 23:35:04 crc kubenswrapper[4910]: I0105 23:35:04.669735 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ltq9\" (UniqueName: \"kubernetes.io/projected/b2ff657b-9e47-472a-9ff1-eda124dd4db8-kube-api-access-4ltq9\") pod \"aodh-db-sync-2w8sc\" (UID: \"b2ff657b-9e47-472a-9ff1-eda124dd4db8\") " pod="openstack/aodh-db-sync-2w8sc" Jan 05 23:35:04 crc kubenswrapper[4910]: I0105 23:35:04.796435 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-2w8sc" Jan 05 23:35:05 crc kubenswrapper[4910]: I0105 23:35:05.270677 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-2w8sc"] Jan 05 23:35:05 crc kubenswrapper[4910]: W0105 23:35:05.271817 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb2ff657b_9e47_472a_9ff1_eda124dd4db8.slice/crio-28250487fbfb84f867f118150f02444110f98e608e2fd19125286dc76eb0f33e WatchSource:0}: Error finding container 28250487fbfb84f867f118150f02444110f98e608e2fd19125286dc76eb0f33e: Status 404 returned error can't find the container with id 28250487fbfb84f867f118150f02444110f98e608e2fd19125286dc76eb0f33e Jan 05 23:35:05 crc kubenswrapper[4910]: I0105 23:35:05.497347 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-2w8sc" event={"ID":"b2ff657b-9e47-472a-9ff1-eda124dd4db8","Type":"ContainerStarted","Data":"28250487fbfb84f867f118150f02444110f98e608e2fd19125286dc76eb0f33e"} Jan 05 23:35:06 crc kubenswrapper[4910]: I0105 23:35:06.722852 4910 scope.go:117] "RemoveContainer" containerID="f0334e4359831541c2f90c0defd8867b10c126be0235edac4acd76d34a0ba514" Jan 05 23:35:06 crc kubenswrapper[4910]: E0105 23:35:06.723499 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:35:08 crc kubenswrapper[4910]: I0105 23:35:08.049167 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-4qctf"] Jan 05 23:35:08 crc kubenswrapper[4910]: I0105 23:35:08.058699 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-4qctf"] Jan 05 23:35:08 crc kubenswrapper[4910]: I0105 23:35:08.736036 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ef66051-8f95-4f1c-96dc-608d8a5edcfa" path="/var/lib/kubelet/pods/8ef66051-8f95-4f1c-96dc-608d8a5edcfa/volumes" Jan 05 23:35:10 crc kubenswrapper[4910]: I0105 23:35:10.566577 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-2w8sc" event={"ID":"b2ff657b-9e47-472a-9ff1-eda124dd4db8","Type":"ContainerStarted","Data":"ee51b91ed8b3f0c48535ba0eee3cf31112722e5a2b1ea3521a46688075f1c2d8"} Jan 05 23:35:10 crc kubenswrapper[4910]: I0105 23:35:10.597958 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-2w8sc" podStartSLOduration=2.016672281 podStartE2EDuration="6.597938918s" podCreationTimestamp="2026-01-05 23:35:04 +0000 UTC" firstStartedPulling="2026-01-05 23:35:05.274619408 +0000 UTC m=+6236.852117078" lastFinishedPulling="2026-01-05 23:35:09.855886045 +0000 UTC m=+6241.433383715" observedRunningTime="2026-01-05 23:35:10.586466075 +0000 UTC m=+6242.163963745" watchObservedRunningTime="2026-01-05 23:35:10.597938918 +0000 UTC m=+6242.175436588" Jan 05 23:35:13 crc kubenswrapper[4910]: I0105 23:35:13.602702 4910 generic.go:334] "Generic (PLEG): container finished" podID="b2ff657b-9e47-472a-9ff1-eda124dd4db8" containerID="ee51b91ed8b3f0c48535ba0eee3cf31112722e5a2b1ea3521a46688075f1c2d8" exitCode=0 Jan 05 23:35:13 crc kubenswrapper[4910]: I0105 23:35:13.602755 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-2w8sc" event={"ID":"b2ff657b-9e47-472a-9ff1-eda124dd4db8","Type":"ContainerDied","Data":"ee51b91ed8b3f0c48535ba0eee3cf31112722e5a2b1ea3521a46688075f1c2d8"} Jan 05 23:35:15 crc kubenswrapper[4910]: I0105 23:35:15.003959 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-2w8sc" Jan 05 23:35:15 crc kubenswrapper[4910]: I0105 23:35:15.116222 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2ff657b-9e47-472a-9ff1-eda124dd4db8-combined-ca-bundle\") pod \"b2ff657b-9e47-472a-9ff1-eda124dd4db8\" (UID: \"b2ff657b-9e47-472a-9ff1-eda124dd4db8\") " Jan 05 23:35:15 crc kubenswrapper[4910]: I0105 23:35:15.116771 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2ff657b-9e47-472a-9ff1-eda124dd4db8-config-data\") pod \"b2ff657b-9e47-472a-9ff1-eda124dd4db8\" (UID: \"b2ff657b-9e47-472a-9ff1-eda124dd4db8\") " Jan 05 23:35:15 crc kubenswrapper[4910]: I0105 23:35:15.116816 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4ltq9\" (UniqueName: \"kubernetes.io/projected/b2ff657b-9e47-472a-9ff1-eda124dd4db8-kube-api-access-4ltq9\") pod \"b2ff657b-9e47-472a-9ff1-eda124dd4db8\" (UID: \"b2ff657b-9e47-472a-9ff1-eda124dd4db8\") " Jan 05 23:35:15 crc kubenswrapper[4910]: I0105 23:35:15.116866 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2ff657b-9e47-472a-9ff1-eda124dd4db8-scripts\") pod \"b2ff657b-9e47-472a-9ff1-eda124dd4db8\" (UID: \"b2ff657b-9e47-472a-9ff1-eda124dd4db8\") " Jan 05 23:35:15 crc kubenswrapper[4910]: I0105 23:35:15.122642 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2ff657b-9e47-472a-9ff1-eda124dd4db8-kube-api-access-4ltq9" (OuterVolumeSpecName: "kube-api-access-4ltq9") pod "b2ff657b-9e47-472a-9ff1-eda124dd4db8" (UID: "b2ff657b-9e47-472a-9ff1-eda124dd4db8"). InnerVolumeSpecName "kube-api-access-4ltq9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:35:15 crc kubenswrapper[4910]: I0105 23:35:15.123707 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2ff657b-9e47-472a-9ff1-eda124dd4db8-scripts" (OuterVolumeSpecName: "scripts") pod "b2ff657b-9e47-472a-9ff1-eda124dd4db8" (UID: "b2ff657b-9e47-472a-9ff1-eda124dd4db8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:35:15 crc kubenswrapper[4910]: I0105 23:35:15.145045 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2ff657b-9e47-472a-9ff1-eda124dd4db8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b2ff657b-9e47-472a-9ff1-eda124dd4db8" (UID: "b2ff657b-9e47-472a-9ff1-eda124dd4db8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:35:15 crc kubenswrapper[4910]: I0105 23:35:15.151332 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2ff657b-9e47-472a-9ff1-eda124dd4db8-config-data" (OuterVolumeSpecName: "config-data") pod "b2ff657b-9e47-472a-9ff1-eda124dd4db8" (UID: "b2ff657b-9e47-472a-9ff1-eda124dd4db8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:35:15 crc kubenswrapper[4910]: I0105 23:35:15.219705 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2ff657b-9e47-472a-9ff1-eda124dd4db8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:35:15 crc kubenswrapper[4910]: I0105 23:35:15.219767 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2ff657b-9e47-472a-9ff1-eda124dd4db8-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:35:15 crc kubenswrapper[4910]: I0105 23:35:15.219788 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4ltq9\" (UniqueName: \"kubernetes.io/projected/b2ff657b-9e47-472a-9ff1-eda124dd4db8-kube-api-access-4ltq9\") on node \"crc\" DevicePath \"\"" Jan 05 23:35:15 crc kubenswrapper[4910]: I0105 23:35:15.219808 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2ff657b-9e47-472a-9ff1-eda124dd4db8-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:35:15 crc kubenswrapper[4910]: I0105 23:35:15.635116 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-2w8sc" event={"ID":"b2ff657b-9e47-472a-9ff1-eda124dd4db8","Type":"ContainerDied","Data":"28250487fbfb84f867f118150f02444110f98e608e2fd19125286dc76eb0f33e"} Jan 05 23:35:15 crc kubenswrapper[4910]: I0105 23:35:15.635552 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="28250487fbfb84f867f118150f02444110f98e608e2fd19125286dc76eb0f33e" Jan 05 23:35:15 crc kubenswrapper[4910]: I0105 23:35:15.635792 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-2w8sc" Jan 05 23:35:17 crc kubenswrapper[4910]: I0105 23:35:17.029358 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 05 23:35:18 crc kubenswrapper[4910]: I0105 23:35:18.860686 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Jan 05 23:35:18 crc kubenswrapper[4910]: E0105 23:35:18.862210 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2ff657b-9e47-472a-9ff1-eda124dd4db8" containerName="aodh-db-sync" Jan 05 23:35:18 crc kubenswrapper[4910]: I0105 23:35:18.862239 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2ff657b-9e47-472a-9ff1-eda124dd4db8" containerName="aodh-db-sync" Jan 05 23:35:18 crc kubenswrapper[4910]: I0105 23:35:18.862687 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2ff657b-9e47-472a-9ff1-eda124dd4db8" containerName="aodh-db-sync" Jan 05 23:35:18 crc kubenswrapper[4910]: I0105 23:35:18.866297 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 05 23:35:18 crc kubenswrapper[4910]: I0105 23:35:18.871374 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-pnrzn" Jan 05 23:35:18 crc kubenswrapper[4910]: I0105 23:35:18.871710 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Jan 05 23:35:18 crc kubenswrapper[4910]: I0105 23:35:18.872048 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Jan 05 23:35:18 crc kubenswrapper[4910]: I0105 23:35:18.883390 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 05 23:35:18 crc kubenswrapper[4910]: I0105 23:35:18.927377 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6de8dea8-3e8e-4b14-8b24-51ce2a7952b2-combined-ca-bundle\") pod \"aodh-0\" (UID: \"6de8dea8-3e8e-4b14-8b24-51ce2a7952b2\") " pod="openstack/aodh-0" Jan 05 23:35:18 crc kubenswrapper[4910]: I0105 23:35:18.927438 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rl8c6\" (UniqueName: \"kubernetes.io/projected/6de8dea8-3e8e-4b14-8b24-51ce2a7952b2-kube-api-access-rl8c6\") pod \"aodh-0\" (UID: \"6de8dea8-3e8e-4b14-8b24-51ce2a7952b2\") " pod="openstack/aodh-0" Jan 05 23:35:18 crc kubenswrapper[4910]: I0105 23:35:18.927616 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6de8dea8-3e8e-4b14-8b24-51ce2a7952b2-scripts\") pod \"aodh-0\" (UID: \"6de8dea8-3e8e-4b14-8b24-51ce2a7952b2\") " pod="openstack/aodh-0" Jan 05 23:35:18 crc kubenswrapper[4910]: I0105 23:35:18.927747 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6de8dea8-3e8e-4b14-8b24-51ce2a7952b2-config-data\") pod \"aodh-0\" (UID: \"6de8dea8-3e8e-4b14-8b24-51ce2a7952b2\") " pod="openstack/aodh-0" Jan 05 23:35:19 crc kubenswrapper[4910]: I0105 23:35:19.030086 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6de8dea8-3e8e-4b14-8b24-51ce2a7952b2-combined-ca-bundle\") pod \"aodh-0\" (UID: \"6de8dea8-3e8e-4b14-8b24-51ce2a7952b2\") " pod="openstack/aodh-0" Jan 05 23:35:19 crc kubenswrapper[4910]: I0105 23:35:19.030163 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rl8c6\" (UniqueName: \"kubernetes.io/projected/6de8dea8-3e8e-4b14-8b24-51ce2a7952b2-kube-api-access-rl8c6\") pod \"aodh-0\" (UID: \"6de8dea8-3e8e-4b14-8b24-51ce2a7952b2\") " pod="openstack/aodh-0" Jan 05 23:35:19 crc kubenswrapper[4910]: I0105 23:35:19.030229 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6de8dea8-3e8e-4b14-8b24-51ce2a7952b2-scripts\") pod \"aodh-0\" (UID: \"6de8dea8-3e8e-4b14-8b24-51ce2a7952b2\") " pod="openstack/aodh-0" Jan 05 23:35:19 crc kubenswrapper[4910]: I0105 23:35:19.030281 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6de8dea8-3e8e-4b14-8b24-51ce2a7952b2-config-data\") pod \"aodh-0\" (UID: \"6de8dea8-3e8e-4b14-8b24-51ce2a7952b2\") " pod="openstack/aodh-0" Jan 05 23:35:19 crc kubenswrapper[4910]: I0105 23:35:19.037398 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6de8dea8-3e8e-4b14-8b24-51ce2a7952b2-scripts\") pod \"aodh-0\" (UID: \"6de8dea8-3e8e-4b14-8b24-51ce2a7952b2\") " pod="openstack/aodh-0" Jan 05 23:35:19 crc kubenswrapper[4910]: I0105 23:35:19.037896 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6de8dea8-3e8e-4b14-8b24-51ce2a7952b2-config-data\") pod \"aodh-0\" (UID: \"6de8dea8-3e8e-4b14-8b24-51ce2a7952b2\") " pod="openstack/aodh-0" Jan 05 23:35:19 crc kubenswrapper[4910]: I0105 23:35:19.048007 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6de8dea8-3e8e-4b14-8b24-51ce2a7952b2-combined-ca-bundle\") pod \"aodh-0\" (UID: \"6de8dea8-3e8e-4b14-8b24-51ce2a7952b2\") " pod="openstack/aodh-0" Jan 05 23:35:19 crc kubenswrapper[4910]: I0105 23:35:19.048009 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rl8c6\" (UniqueName: \"kubernetes.io/projected/6de8dea8-3e8e-4b14-8b24-51ce2a7952b2-kube-api-access-rl8c6\") pod \"aodh-0\" (UID: \"6de8dea8-3e8e-4b14-8b24-51ce2a7952b2\") " pod="openstack/aodh-0" Jan 05 23:35:19 crc kubenswrapper[4910]: I0105 23:35:19.203615 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 05 23:35:19 crc kubenswrapper[4910]: I0105 23:35:19.717025 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 05 23:35:19 crc kubenswrapper[4910]: W0105 23:35:19.718156 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6de8dea8_3e8e_4b14_8b24_51ce2a7952b2.slice/crio-87573eb94c71efe5f8050ed8732e2311f557f4202ab29cb744602f4ea421ce03 WatchSource:0}: Error finding container 87573eb94c71efe5f8050ed8732e2311f557f4202ab29cb744602f4ea421ce03: Status 404 returned error can't find the container with id 87573eb94c71efe5f8050ed8732e2311f557f4202ab29cb744602f4ea421ce03 Jan 05 23:35:20 crc kubenswrapper[4910]: I0105 23:35:20.692444 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"6de8dea8-3e8e-4b14-8b24-51ce2a7952b2","Type":"ContainerStarted","Data":"fe43f942795a2458bdc53852eb35bd61262de9ed5f783887ccab4f406dd3735e"} Jan 05 23:35:20 crc kubenswrapper[4910]: I0105 23:35:20.692890 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"6de8dea8-3e8e-4b14-8b24-51ce2a7952b2","Type":"ContainerStarted","Data":"87573eb94c71efe5f8050ed8732e2311f557f4202ab29cb744602f4ea421ce03"} Jan 05 23:35:21 crc kubenswrapper[4910]: I0105 23:35:21.244222 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 05 23:35:21 crc kubenswrapper[4910]: I0105 23:35:21.244495 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b448d106-d07f-4d39-ba3f-3b8904ef7baf" containerName="ceilometer-central-agent" containerID="cri-o://db853bf3b3c0441fde93e0dfb51ade8de1d1bbb66119c67220f6ffa824e6e5cb" gracePeriod=30 Jan 05 23:35:21 crc kubenswrapper[4910]: I0105 23:35:21.244657 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b448d106-d07f-4d39-ba3f-3b8904ef7baf" containerName="proxy-httpd" containerID="cri-o://bf744520b2a5567cdbf53e2874c9fa07164e3fec9ac16b4055044431b58d7417" gracePeriod=30 Jan 05 23:35:21 crc kubenswrapper[4910]: I0105 23:35:21.244761 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b448d106-d07f-4d39-ba3f-3b8904ef7baf" containerName="ceilometer-notification-agent" containerID="cri-o://ba737a9e701e201433ff1810e7f1e1a74e1d07465ae371169f375befac870a92" gracePeriod=30 Jan 05 23:35:21 crc kubenswrapper[4910]: I0105 23:35:21.244811 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b448d106-d07f-4d39-ba3f-3b8904ef7baf" containerName="sg-core" containerID="cri-o://341785a368782f7a4ae4b670d6b18bd1735a5957dfe50035ef849aa7f2b5c1bf" gracePeriod=30 Jan 05 23:35:21 crc kubenswrapper[4910]: I0105 23:35:21.706262 4910 generic.go:334] "Generic (PLEG): container finished" podID="b448d106-d07f-4d39-ba3f-3b8904ef7baf" containerID="bf744520b2a5567cdbf53e2874c9fa07164e3fec9ac16b4055044431b58d7417" exitCode=0 Jan 05 23:35:21 crc kubenswrapper[4910]: I0105 23:35:21.706546 4910 generic.go:334] "Generic (PLEG): container finished" podID="b448d106-d07f-4d39-ba3f-3b8904ef7baf" containerID="341785a368782f7a4ae4b670d6b18bd1735a5957dfe50035ef849aa7f2b5c1bf" exitCode=2 Jan 05 23:35:21 crc kubenswrapper[4910]: I0105 23:35:21.706555 4910 generic.go:334] "Generic (PLEG): container finished" podID="b448d106-d07f-4d39-ba3f-3b8904ef7baf" containerID="db853bf3b3c0441fde93e0dfb51ade8de1d1bbb66119c67220f6ffa824e6e5cb" exitCode=0 Jan 05 23:35:21 crc kubenswrapper[4910]: I0105 23:35:21.706328 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b448d106-d07f-4d39-ba3f-3b8904ef7baf","Type":"ContainerDied","Data":"bf744520b2a5567cdbf53e2874c9fa07164e3fec9ac16b4055044431b58d7417"} Jan 05 23:35:21 crc kubenswrapper[4910]: I0105 23:35:21.706590 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b448d106-d07f-4d39-ba3f-3b8904ef7baf","Type":"ContainerDied","Data":"341785a368782f7a4ae4b670d6b18bd1735a5957dfe50035ef849aa7f2b5c1bf"} Jan 05 23:35:21 crc kubenswrapper[4910]: I0105 23:35:21.706600 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b448d106-d07f-4d39-ba3f-3b8904ef7baf","Type":"ContainerDied","Data":"db853bf3b3c0441fde93e0dfb51ade8de1d1bbb66119c67220f6ffa824e6e5cb"} Jan 05 23:35:21 crc kubenswrapper[4910]: I0105 23:35:21.722184 4910 scope.go:117] "RemoveContainer" containerID="f0334e4359831541c2f90c0defd8867b10c126be0235edac4acd76d34a0ba514" Jan 05 23:35:21 crc kubenswrapper[4910]: E0105 23:35:21.722497 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:35:22 crc kubenswrapper[4910]: I0105 23:35:22.733113 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"6de8dea8-3e8e-4b14-8b24-51ce2a7952b2","Type":"ContainerStarted","Data":"a06d7aa7ff45744fbacaa20e3e1cfadccf53922806ecfcf8c887b0b3ae60ad0c"} Jan 05 23:35:23 crc kubenswrapper[4910]: I0105 23:35:23.739158 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"6de8dea8-3e8e-4b14-8b24-51ce2a7952b2","Type":"ContainerStarted","Data":"ba9c979d3196f00a5d2015b83cc31d1d57eda3402d25c9d4695683cd53affbb2"} Jan 05 23:35:25 crc kubenswrapper[4910]: I0105 23:35:25.772108 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"6de8dea8-3e8e-4b14-8b24-51ce2a7952b2","Type":"ContainerStarted","Data":"830e0eb310f5a0549b30c67f7334f75907f7b3036db3814b0e1419fa56c6ca76"} Jan 05 23:35:25 crc kubenswrapper[4910]: I0105 23:35:25.776723 4910 generic.go:334] "Generic (PLEG): container finished" podID="b448d106-d07f-4d39-ba3f-3b8904ef7baf" containerID="ba737a9e701e201433ff1810e7f1e1a74e1d07465ae371169f375befac870a92" exitCode=0 Jan 05 23:35:25 crc kubenswrapper[4910]: I0105 23:35:25.776772 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b448d106-d07f-4d39-ba3f-3b8904ef7baf","Type":"ContainerDied","Data":"ba737a9e701e201433ff1810e7f1e1a74e1d07465ae371169f375befac870a92"} Jan 05 23:35:25 crc kubenswrapper[4910]: I0105 23:35:25.817153 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.101513468 podStartE2EDuration="7.817132587s" podCreationTimestamp="2026-01-05 23:35:18 +0000 UTC" firstStartedPulling="2026-01-05 23:35:19.720942438 +0000 UTC m=+6251.298440108" lastFinishedPulling="2026-01-05 23:35:25.436561557 +0000 UTC m=+6257.014059227" observedRunningTime="2026-01-05 23:35:25.797066848 +0000 UTC m=+6257.374564528" watchObservedRunningTime="2026-01-05 23:35:25.817132587 +0000 UTC m=+6257.394630257" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.069225 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.157078 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b448d106-d07f-4d39-ba3f-3b8904ef7baf-log-httpd\") pod \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\" (UID: \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\") " Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.157159 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b448d106-d07f-4d39-ba3f-3b8904ef7baf-config-data\") pod \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\" (UID: \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\") " Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.157225 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b448d106-d07f-4d39-ba3f-3b8904ef7baf-combined-ca-bundle\") pod \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\" (UID: \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\") " Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.157357 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b448d106-d07f-4d39-ba3f-3b8904ef7baf-run-httpd\") pod \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\" (UID: \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\") " Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.157384 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bdpsx\" (UniqueName: \"kubernetes.io/projected/b448d106-d07f-4d39-ba3f-3b8904ef7baf-kube-api-access-bdpsx\") pod \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\" (UID: \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\") " Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.157403 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b448d106-d07f-4d39-ba3f-3b8904ef7baf-scripts\") pod \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\" (UID: \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\") " Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.157489 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b448d106-d07f-4d39-ba3f-3b8904ef7baf-sg-core-conf-yaml\") pod \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\" (UID: \"b448d106-d07f-4d39-ba3f-3b8904ef7baf\") " Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.158715 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b448d106-d07f-4d39-ba3f-3b8904ef7baf-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "b448d106-d07f-4d39-ba3f-3b8904ef7baf" (UID: "b448d106-d07f-4d39-ba3f-3b8904ef7baf"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.158750 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b448d106-d07f-4d39-ba3f-3b8904ef7baf-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "b448d106-d07f-4d39-ba3f-3b8904ef7baf" (UID: "b448d106-d07f-4d39-ba3f-3b8904ef7baf"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.163188 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b448d106-d07f-4d39-ba3f-3b8904ef7baf-scripts" (OuterVolumeSpecName: "scripts") pod "b448d106-d07f-4d39-ba3f-3b8904ef7baf" (UID: "b448d106-d07f-4d39-ba3f-3b8904ef7baf"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.163274 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b448d106-d07f-4d39-ba3f-3b8904ef7baf-kube-api-access-bdpsx" (OuterVolumeSpecName: "kube-api-access-bdpsx") pod "b448d106-d07f-4d39-ba3f-3b8904ef7baf" (UID: "b448d106-d07f-4d39-ba3f-3b8904ef7baf"). InnerVolumeSpecName "kube-api-access-bdpsx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.197252 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b448d106-d07f-4d39-ba3f-3b8904ef7baf-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "b448d106-d07f-4d39-ba3f-3b8904ef7baf" (UID: "b448d106-d07f-4d39-ba3f-3b8904ef7baf"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.260181 4910 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b448d106-d07f-4d39-ba3f-3b8904ef7baf-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.260460 4910 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b448d106-d07f-4d39-ba3f-3b8904ef7baf-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.260469 4910 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b448d106-d07f-4d39-ba3f-3b8904ef7baf-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.260482 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bdpsx\" (UniqueName: \"kubernetes.io/projected/b448d106-d07f-4d39-ba3f-3b8904ef7baf-kube-api-access-bdpsx\") on node \"crc\" DevicePath \"\"" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.260491 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b448d106-d07f-4d39-ba3f-3b8904ef7baf-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.262995 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b448d106-d07f-4d39-ba3f-3b8904ef7baf-config-data" (OuterVolumeSpecName: "config-data") pod "b448d106-d07f-4d39-ba3f-3b8904ef7baf" (UID: "b448d106-d07f-4d39-ba3f-3b8904ef7baf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.267324 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b448d106-d07f-4d39-ba3f-3b8904ef7baf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b448d106-d07f-4d39-ba3f-3b8904ef7baf" (UID: "b448d106-d07f-4d39-ba3f-3b8904ef7baf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.362971 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b448d106-d07f-4d39-ba3f-3b8904ef7baf-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.363024 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b448d106-d07f-4d39-ba3f-3b8904ef7baf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.792660 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.793320 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b448d106-d07f-4d39-ba3f-3b8904ef7baf","Type":"ContainerDied","Data":"0bcdcb0ea3f70c9a4f5d2731001de4a0d7eecb7eec67f6ed757b44e94ba62312"} Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.793387 4910 scope.go:117] "RemoveContainer" containerID="bf744520b2a5567cdbf53e2874c9fa07164e3fec9ac16b4055044431b58d7417" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.838529 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.850841 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.851683 4910 scope.go:117] "RemoveContainer" containerID="341785a368782f7a4ae4b670d6b18bd1735a5957dfe50035ef849aa7f2b5c1bf" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.882062 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 05 23:35:26 crc kubenswrapper[4910]: E0105 23:35:26.882622 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b448d106-d07f-4d39-ba3f-3b8904ef7baf" containerName="ceilometer-central-agent" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.882644 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b448d106-d07f-4d39-ba3f-3b8904ef7baf" containerName="ceilometer-central-agent" Jan 05 23:35:26 crc kubenswrapper[4910]: E0105 23:35:26.882667 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b448d106-d07f-4d39-ba3f-3b8904ef7baf" containerName="ceilometer-notification-agent" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.882675 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b448d106-d07f-4d39-ba3f-3b8904ef7baf" containerName="ceilometer-notification-agent" Jan 05 23:35:26 crc kubenswrapper[4910]: E0105 23:35:26.882703 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b448d106-d07f-4d39-ba3f-3b8904ef7baf" containerName="proxy-httpd" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.882713 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b448d106-d07f-4d39-ba3f-3b8904ef7baf" containerName="proxy-httpd" Jan 05 23:35:26 crc kubenswrapper[4910]: E0105 23:35:26.882730 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b448d106-d07f-4d39-ba3f-3b8904ef7baf" containerName="sg-core" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.882738 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b448d106-d07f-4d39-ba3f-3b8904ef7baf" containerName="sg-core" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.883002 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="b448d106-d07f-4d39-ba3f-3b8904ef7baf" containerName="proxy-httpd" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.883017 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="b448d106-d07f-4d39-ba3f-3b8904ef7baf" containerName="ceilometer-central-agent" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.883030 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="b448d106-d07f-4d39-ba3f-3b8904ef7baf" containerName="ceilometer-notification-agent" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.883046 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="b448d106-d07f-4d39-ba3f-3b8904ef7baf" containerName="sg-core" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.886642 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.886654 4910 scope.go:117] "RemoveContainer" containerID="ba737a9e701e201433ff1810e7f1e1a74e1d07465ae371169f375befac870a92" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.888808 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.889075 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.895960 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.926202 4910 scope.go:117] "RemoveContainer" containerID="db853bf3b3c0441fde93e0dfb51ade8de1d1bbb66119c67220f6ffa824e6e5cb" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.987582 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7d9105dc-6783-4325-87e9-6f6cc9389320-run-httpd\") pod \"ceilometer-0\" (UID: \"7d9105dc-6783-4325-87e9-6f6cc9389320\") " pod="openstack/ceilometer-0" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.987774 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4f77\" (UniqueName: \"kubernetes.io/projected/7d9105dc-6783-4325-87e9-6f6cc9389320-kube-api-access-s4f77\") pod \"ceilometer-0\" (UID: \"7d9105dc-6783-4325-87e9-6f6cc9389320\") " pod="openstack/ceilometer-0" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.987840 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7d9105dc-6783-4325-87e9-6f6cc9389320-log-httpd\") pod \"ceilometer-0\" (UID: \"7d9105dc-6783-4325-87e9-6f6cc9389320\") " pod="openstack/ceilometer-0" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.987869 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d9105dc-6783-4325-87e9-6f6cc9389320-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7d9105dc-6783-4325-87e9-6f6cc9389320\") " pod="openstack/ceilometer-0" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.987956 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7d9105dc-6783-4325-87e9-6f6cc9389320-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7d9105dc-6783-4325-87e9-6f6cc9389320\") " pod="openstack/ceilometer-0" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.988057 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d9105dc-6783-4325-87e9-6f6cc9389320-config-data\") pod \"ceilometer-0\" (UID: \"7d9105dc-6783-4325-87e9-6f6cc9389320\") " pod="openstack/ceilometer-0" Jan 05 23:35:26 crc kubenswrapper[4910]: I0105 23:35:26.988152 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7d9105dc-6783-4325-87e9-6f6cc9389320-scripts\") pod \"ceilometer-0\" (UID: \"7d9105dc-6783-4325-87e9-6f6cc9389320\") " pod="openstack/ceilometer-0" Jan 05 23:35:27 crc kubenswrapper[4910]: I0105 23:35:27.090315 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7d9105dc-6783-4325-87e9-6f6cc9389320-log-httpd\") pod \"ceilometer-0\" (UID: \"7d9105dc-6783-4325-87e9-6f6cc9389320\") " pod="openstack/ceilometer-0" Jan 05 23:35:27 crc kubenswrapper[4910]: I0105 23:35:27.090357 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d9105dc-6783-4325-87e9-6f6cc9389320-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7d9105dc-6783-4325-87e9-6f6cc9389320\") " pod="openstack/ceilometer-0" Jan 05 23:35:27 crc kubenswrapper[4910]: I0105 23:35:27.090398 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7d9105dc-6783-4325-87e9-6f6cc9389320-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7d9105dc-6783-4325-87e9-6f6cc9389320\") " pod="openstack/ceilometer-0" Jan 05 23:35:27 crc kubenswrapper[4910]: I0105 23:35:27.090480 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d9105dc-6783-4325-87e9-6f6cc9389320-config-data\") pod \"ceilometer-0\" (UID: \"7d9105dc-6783-4325-87e9-6f6cc9389320\") " pod="openstack/ceilometer-0" Jan 05 23:35:27 crc kubenswrapper[4910]: I0105 23:35:27.090548 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7d9105dc-6783-4325-87e9-6f6cc9389320-scripts\") pod \"ceilometer-0\" (UID: \"7d9105dc-6783-4325-87e9-6f6cc9389320\") " pod="openstack/ceilometer-0" Jan 05 23:35:27 crc kubenswrapper[4910]: I0105 23:35:27.090578 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7d9105dc-6783-4325-87e9-6f6cc9389320-run-httpd\") pod \"ceilometer-0\" (UID: \"7d9105dc-6783-4325-87e9-6f6cc9389320\") " pod="openstack/ceilometer-0" Jan 05 23:35:27 crc kubenswrapper[4910]: I0105 23:35:27.090667 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4f77\" (UniqueName: \"kubernetes.io/projected/7d9105dc-6783-4325-87e9-6f6cc9389320-kube-api-access-s4f77\") pod \"ceilometer-0\" (UID: \"7d9105dc-6783-4325-87e9-6f6cc9389320\") " pod="openstack/ceilometer-0" Jan 05 23:35:27 crc kubenswrapper[4910]: I0105 23:35:27.091342 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7d9105dc-6783-4325-87e9-6f6cc9389320-log-httpd\") pod \"ceilometer-0\" (UID: \"7d9105dc-6783-4325-87e9-6f6cc9389320\") " pod="openstack/ceilometer-0" Jan 05 23:35:27 crc kubenswrapper[4910]: I0105 23:35:27.091588 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7d9105dc-6783-4325-87e9-6f6cc9389320-run-httpd\") pod \"ceilometer-0\" (UID: \"7d9105dc-6783-4325-87e9-6f6cc9389320\") " pod="openstack/ceilometer-0" Jan 05 23:35:27 crc kubenswrapper[4910]: I0105 23:35:27.096646 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7d9105dc-6783-4325-87e9-6f6cc9389320-scripts\") pod \"ceilometer-0\" (UID: \"7d9105dc-6783-4325-87e9-6f6cc9389320\") " pod="openstack/ceilometer-0" Jan 05 23:35:27 crc kubenswrapper[4910]: I0105 23:35:27.096807 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d9105dc-6783-4325-87e9-6f6cc9389320-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7d9105dc-6783-4325-87e9-6f6cc9389320\") " pod="openstack/ceilometer-0" Jan 05 23:35:27 crc kubenswrapper[4910]: I0105 23:35:27.098987 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d9105dc-6783-4325-87e9-6f6cc9389320-config-data\") pod \"ceilometer-0\" (UID: \"7d9105dc-6783-4325-87e9-6f6cc9389320\") " pod="openstack/ceilometer-0" Jan 05 23:35:27 crc kubenswrapper[4910]: I0105 23:35:27.111439 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7d9105dc-6783-4325-87e9-6f6cc9389320-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7d9105dc-6783-4325-87e9-6f6cc9389320\") " pod="openstack/ceilometer-0" Jan 05 23:35:27 crc kubenswrapper[4910]: I0105 23:35:27.111569 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4f77\" (UniqueName: \"kubernetes.io/projected/7d9105dc-6783-4325-87e9-6f6cc9389320-kube-api-access-s4f77\") pod \"ceilometer-0\" (UID: \"7d9105dc-6783-4325-87e9-6f6cc9389320\") " pod="openstack/ceilometer-0" Jan 05 23:35:27 crc kubenswrapper[4910]: I0105 23:35:27.213734 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 23:35:27 crc kubenswrapper[4910]: W0105 23:35:27.727941 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7d9105dc_6783_4325_87e9_6f6cc9389320.slice/crio-3d638b03aeb077804c037203878f165051ee7356c61ee7fd3c64a8a87429c19d WatchSource:0}: Error finding container 3d638b03aeb077804c037203878f165051ee7356c61ee7fd3c64a8a87429c19d: Status 404 returned error can't find the container with id 3d638b03aeb077804c037203878f165051ee7356c61ee7fd3c64a8a87429c19d Jan 05 23:35:27 crc kubenswrapper[4910]: I0105 23:35:27.731062 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 05 23:35:27 crc kubenswrapper[4910]: I0105 23:35:27.807888 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7d9105dc-6783-4325-87e9-6f6cc9389320","Type":"ContainerStarted","Data":"3d638b03aeb077804c037203878f165051ee7356c61ee7fd3c64a8a87429c19d"} Jan 05 23:35:28 crc kubenswrapper[4910]: I0105 23:35:28.736607 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b448d106-d07f-4d39-ba3f-3b8904ef7baf" path="/var/lib/kubelet/pods/b448d106-d07f-4d39-ba3f-3b8904ef7baf/volumes" Jan 05 23:35:28 crc kubenswrapper[4910]: I0105 23:35:28.818309 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7d9105dc-6783-4325-87e9-6f6cc9389320","Type":"ContainerStarted","Data":"c5cf4184f71b27c0c31761d57e63cb096a483d57d1127c333686f93eeba5ab56"} Jan 05 23:35:29 crc kubenswrapper[4910]: I0105 23:35:29.836190 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7d9105dc-6783-4325-87e9-6f6cc9389320","Type":"ContainerStarted","Data":"97a7ebad088f6af984b0e6fb9aeb645cbdd13d94e95a57914af3a7a6d7cbb8ad"} Jan 05 23:35:30 crc kubenswrapper[4910]: I0105 23:35:30.848551 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7d9105dc-6783-4325-87e9-6f6cc9389320","Type":"ContainerStarted","Data":"a3f4e83aed3b4c26a785d83579d5ab445ed3a45aebb26a6911213620b13bd26b"} Jan 05 23:35:31 crc kubenswrapper[4910]: I0105 23:35:31.638169 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-1129-account-create-update-qbxlb"] Jan 05 23:35:31 crc kubenswrapper[4910]: I0105 23:35:31.641077 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-1129-account-create-update-qbxlb" Jan 05 23:35:31 crc kubenswrapper[4910]: I0105 23:35:31.645542 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-db-secret" Jan 05 23:35:31 crc kubenswrapper[4910]: I0105 23:35:31.657528 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-db-create-n4xcf"] Jan 05 23:35:31 crc kubenswrapper[4910]: I0105 23:35:31.659071 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-n4xcf" Jan 05 23:35:31 crc kubenswrapper[4910]: I0105 23:35:31.675053 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-1129-account-create-update-qbxlb"] Jan 05 23:35:31 crc kubenswrapper[4910]: I0105 23:35:31.690187 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-create-n4xcf"] Jan 05 23:35:31 crc kubenswrapper[4910]: I0105 23:35:31.803416 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fmwd2\" (UniqueName: \"kubernetes.io/projected/b2c2edf2-0678-437b-aa0a-1b5448266d93-kube-api-access-fmwd2\") pod \"manila-1129-account-create-update-qbxlb\" (UID: \"b2c2edf2-0678-437b-aa0a-1b5448266d93\") " pod="openstack/manila-1129-account-create-update-qbxlb" Jan 05 23:35:31 crc kubenswrapper[4910]: I0105 23:35:31.803475 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m6nhb\" (UniqueName: \"kubernetes.io/projected/93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1-kube-api-access-m6nhb\") pod \"manila-db-create-n4xcf\" (UID: \"93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1\") " pod="openstack/manila-db-create-n4xcf" Jan 05 23:35:31 crc kubenswrapper[4910]: I0105 23:35:31.803510 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1-operator-scripts\") pod \"manila-db-create-n4xcf\" (UID: \"93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1\") " pod="openstack/manila-db-create-n4xcf" Jan 05 23:35:31 crc kubenswrapper[4910]: I0105 23:35:31.803559 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2c2edf2-0678-437b-aa0a-1b5448266d93-operator-scripts\") pod \"manila-1129-account-create-update-qbxlb\" (UID: \"b2c2edf2-0678-437b-aa0a-1b5448266d93\") " pod="openstack/manila-1129-account-create-update-qbxlb" Jan 05 23:35:31 crc kubenswrapper[4910]: I0105 23:35:31.860332 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7d9105dc-6783-4325-87e9-6f6cc9389320","Type":"ContainerStarted","Data":"c5d8a0fb68e2878cb8b8ca86d73e070b93a474ac96eb7790ea132e94f178ab9c"} Jan 05 23:35:31 crc kubenswrapper[4910]: I0105 23:35:31.862365 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 05 23:35:31 crc kubenswrapper[4910]: I0105 23:35:31.892275 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.686537742 podStartE2EDuration="5.892241651s" podCreationTimestamp="2026-01-05 23:35:26 +0000 UTC" firstStartedPulling="2026-01-05 23:35:27.730211137 +0000 UTC m=+6259.307708807" lastFinishedPulling="2026-01-05 23:35:30.935915026 +0000 UTC m=+6262.513412716" observedRunningTime="2026-01-05 23:35:31.88756093 +0000 UTC m=+6263.465058620" watchObservedRunningTime="2026-01-05 23:35:31.892241651 +0000 UTC m=+6263.469739351" Jan 05 23:35:31 crc kubenswrapper[4910]: I0105 23:35:31.906322 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fmwd2\" (UniqueName: \"kubernetes.io/projected/b2c2edf2-0678-437b-aa0a-1b5448266d93-kube-api-access-fmwd2\") pod \"manila-1129-account-create-update-qbxlb\" (UID: \"b2c2edf2-0678-437b-aa0a-1b5448266d93\") " pod="openstack/manila-1129-account-create-update-qbxlb" Jan 05 23:35:31 crc kubenswrapper[4910]: I0105 23:35:31.906399 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m6nhb\" (UniqueName: \"kubernetes.io/projected/93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1-kube-api-access-m6nhb\") pod \"manila-db-create-n4xcf\" (UID: \"93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1\") " pod="openstack/manila-db-create-n4xcf" Jan 05 23:35:31 crc kubenswrapper[4910]: I0105 23:35:31.906467 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1-operator-scripts\") pod \"manila-db-create-n4xcf\" (UID: \"93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1\") " pod="openstack/manila-db-create-n4xcf" Jan 05 23:35:31 crc kubenswrapper[4910]: I0105 23:35:31.906570 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2c2edf2-0678-437b-aa0a-1b5448266d93-operator-scripts\") pod \"manila-1129-account-create-update-qbxlb\" (UID: \"b2c2edf2-0678-437b-aa0a-1b5448266d93\") " pod="openstack/manila-1129-account-create-update-qbxlb" Jan 05 23:35:31 crc kubenswrapper[4910]: I0105 23:35:31.909355 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1-operator-scripts\") pod \"manila-db-create-n4xcf\" (UID: \"93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1\") " pod="openstack/manila-db-create-n4xcf" Jan 05 23:35:31 crc kubenswrapper[4910]: I0105 23:35:31.910244 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2c2edf2-0678-437b-aa0a-1b5448266d93-operator-scripts\") pod \"manila-1129-account-create-update-qbxlb\" (UID: \"b2c2edf2-0678-437b-aa0a-1b5448266d93\") " pod="openstack/manila-1129-account-create-update-qbxlb" Jan 05 23:35:31 crc kubenswrapper[4910]: I0105 23:35:31.929660 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m6nhb\" (UniqueName: \"kubernetes.io/projected/93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1-kube-api-access-m6nhb\") pod \"manila-db-create-n4xcf\" (UID: \"93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1\") " pod="openstack/manila-db-create-n4xcf" Jan 05 23:35:31 crc kubenswrapper[4910]: I0105 23:35:31.937914 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fmwd2\" (UniqueName: \"kubernetes.io/projected/b2c2edf2-0678-437b-aa0a-1b5448266d93-kube-api-access-fmwd2\") pod \"manila-1129-account-create-update-qbxlb\" (UID: \"b2c2edf2-0678-437b-aa0a-1b5448266d93\") " pod="openstack/manila-1129-account-create-update-qbxlb" Jan 05 23:35:31 crc kubenswrapper[4910]: I0105 23:35:31.977832 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-1129-account-create-update-qbxlb" Jan 05 23:35:31 crc kubenswrapper[4910]: I0105 23:35:31.985890 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-n4xcf" Jan 05 23:35:32 crc kubenswrapper[4910]: I0105 23:35:32.528112 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-create-n4xcf"] Jan 05 23:35:32 crc kubenswrapper[4910]: I0105 23:35:32.573361 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-1129-account-create-update-qbxlb"] Jan 05 23:35:32 crc kubenswrapper[4910]: I0105 23:35:32.870603 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-n4xcf" event={"ID":"93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1","Type":"ContainerStarted","Data":"cb864465201e2009fa8c3174c8883e161ca5c4de613f1b0ae6e9d483467c3167"} Jan 05 23:35:32 crc kubenswrapper[4910]: I0105 23:35:32.870653 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-n4xcf" event={"ID":"93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1","Type":"ContainerStarted","Data":"b2d2b968a1d56f6d5642972dd80f2a9377392779ceccf4a9bf904e3b28127f4c"} Jan 05 23:35:32 crc kubenswrapper[4910]: I0105 23:35:32.873061 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-1129-account-create-update-qbxlb" event={"ID":"b2c2edf2-0678-437b-aa0a-1b5448266d93","Type":"ContainerStarted","Data":"592bd69773bc19043084f89888fd6432d729cf8a489de2e5d99149db08a9fd59"} Jan 05 23:35:32 crc kubenswrapper[4910]: I0105 23:35:32.873095 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-1129-account-create-update-qbxlb" event={"ID":"b2c2edf2-0678-437b-aa0a-1b5448266d93","Type":"ContainerStarted","Data":"02bbfc6a1af5cca03a74e041b892e010f6737c33baa88c124f2a87bbbd670e21"} Jan 05 23:35:32 crc kubenswrapper[4910]: I0105 23:35:32.896213 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-db-create-n4xcf" podStartSLOduration=1.896194443 podStartE2EDuration="1.896194443s" podCreationTimestamp="2026-01-05 23:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:35:32.890298552 +0000 UTC m=+6264.467796212" watchObservedRunningTime="2026-01-05 23:35:32.896194443 +0000 UTC m=+6264.473692113" Jan 05 23:35:33 crc kubenswrapper[4910]: I0105 23:35:33.882465 4910 generic.go:334] "Generic (PLEG): container finished" podID="b2c2edf2-0678-437b-aa0a-1b5448266d93" containerID="592bd69773bc19043084f89888fd6432d729cf8a489de2e5d99149db08a9fd59" exitCode=0 Jan 05 23:35:33 crc kubenswrapper[4910]: I0105 23:35:33.882877 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-1129-account-create-update-qbxlb" event={"ID":"b2c2edf2-0678-437b-aa0a-1b5448266d93","Type":"ContainerDied","Data":"592bd69773bc19043084f89888fd6432d729cf8a489de2e5d99149db08a9fd59"} Jan 05 23:35:33 crc kubenswrapper[4910]: I0105 23:35:33.884620 4910 generic.go:334] "Generic (PLEG): container finished" podID="93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1" containerID="cb864465201e2009fa8c3174c8883e161ca5c4de613f1b0ae6e9d483467c3167" exitCode=0 Jan 05 23:35:33 crc kubenswrapper[4910]: I0105 23:35:33.885727 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-n4xcf" event={"ID":"93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1","Type":"ContainerDied","Data":"cb864465201e2009fa8c3174c8883e161ca5c4de613f1b0ae6e9d483467c3167"} Jan 05 23:35:35 crc kubenswrapper[4910]: I0105 23:35:35.464813 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-1129-account-create-update-qbxlb" Jan 05 23:35:35 crc kubenswrapper[4910]: I0105 23:35:35.471575 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-n4xcf" Jan 05 23:35:35 crc kubenswrapper[4910]: I0105 23:35:35.615238 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m6nhb\" (UniqueName: \"kubernetes.io/projected/93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1-kube-api-access-m6nhb\") pod \"93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1\" (UID: \"93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1\") " Jan 05 23:35:35 crc kubenswrapper[4910]: I0105 23:35:35.615420 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2c2edf2-0678-437b-aa0a-1b5448266d93-operator-scripts\") pod \"b2c2edf2-0678-437b-aa0a-1b5448266d93\" (UID: \"b2c2edf2-0678-437b-aa0a-1b5448266d93\") " Jan 05 23:35:35 crc kubenswrapper[4910]: I0105 23:35:35.615491 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1-operator-scripts\") pod \"93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1\" (UID: \"93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1\") " Jan 05 23:35:35 crc kubenswrapper[4910]: I0105 23:35:35.616173 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fmwd2\" (UniqueName: \"kubernetes.io/projected/b2c2edf2-0678-437b-aa0a-1b5448266d93-kube-api-access-fmwd2\") pod \"b2c2edf2-0678-437b-aa0a-1b5448266d93\" (UID: \"b2c2edf2-0678-437b-aa0a-1b5448266d93\") " Jan 05 23:35:35 crc kubenswrapper[4910]: I0105 23:35:35.616407 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1" (UID: "93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:35:35 crc kubenswrapper[4910]: I0105 23:35:35.616439 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b2c2edf2-0678-437b-aa0a-1b5448266d93-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b2c2edf2-0678-437b-aa0a-1b5448266d93" (UID: "b2c2edf2-0678-437b-aa0a-1b5448266d93"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:35:35 crc kubenswrapper[4910]: I0105 23:35:35.618262 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2c2edf2-0678-437b-aa0a-1b5448266d93-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:35:35 crc kubenswrapper[4910]: I0105 23:35:35.618315 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:35:35 crc kubenswrapper[4910]: I0105 23:35:35.622003 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1-kube-api-access-m6nhb" (OuterVolumeSpecName: "kube-api-access-m6nhb") pod "93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1" (UID: "93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1"). InnerVolumeSpecName "kube-api-access-m6nhb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:35:35 crc kubenswrapper[4910]: I0105 23:35:35.622515 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2c2edf2-0678-437b-aa0a-1b5448266d93-kube-api-access-fmwd2" (OuterVolumeSpecName: "kube-api-access-fmwd2") pod "b2c2edf2-0678-437b-aa0a-1b5448266d93" (UID: "b2c2edf2-0678-437b-aa0a-1b5448266d93"). InnerVolumeSpecName "kube-api-access-fmwd2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:35:35 crc kubenswrapper[4910]: I0105 23:35:35.721012 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fmwd2\" (UniqueName: \"kubernetes.io/projected/b2c2edf2-0678-437b-aa0a-1b5448266d93-kube-api-access-fmwd2\") on node \"crc\" DevicePath \"\"" Jan 05 23:35:35 crc kubenswrapper[4910]: I0105 23:35:35.721084 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m6nhb\" (UniqueName: \"kubernetes.io/projected/93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1-kube-api-access-m6nhb\") on node \"crc\" DevicePath \"\"" Jan 05 23:35:35 crc kubenswrapper[4910]: I0105 23:35:35.921843 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-1129-account-create-update-qbxlb" event={"ID":"b2c2edf2-0678-437b-aa0a-1b5448266d93","Type":"ContainerDied","Data":"02bbfc6a1af5cca03a74e041b892e010f6737c33baa88c124f2a87bbbd670e21"} Jan 05 23:35:35 crc kubenswrapper[4910]: I0105 23:35:35.922428 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="02bbfc6a1af5cca03a74e041b892e010f6737c33baa88c124f2a87bbbd670e21" Jan 05 23:35:35 crc kubenswrapper[4910]: I0105 23:35:35.922423 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-1129-account-create-update-qbxlb" Jan 05 23:35:35 crc kubenswrapper[4910]: I0105 23:35:35.930545 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-n4xcf" event={"ID":"93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1","Type":"ContainerDied","Data":"b2d2b968a1d56f6d5642972dd80f2a9377392779ceccf4a9bf904e3b28127f4c"} Jan 05 23:35:35 crc kubenswrapper[4910]: I0105 23:35:35.930604 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b2d2b968a1d56f6d5642972dd80f2a9377392779ceccf4a9bf904e3b28127f4c" Jan 05 23:35:35 crc kubenswrapper[4910]: I0105 23:35:35.930669 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-n4xcf" Jan 05 23:35:36 crc kubenswrapper[4910]: I0105 23:35:36.722399 4910 scope.go:117] "RemoveContainer" containerID="f0334e4359831541c2f90c0defd8867b10c126be0235edac4acd76d34a0ba514" Jan 05 23:35:36 crc kubenswrapper[4910]: E0105 23:35:36.722767 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:35:36 crc kubenswrapper[4910]: I0105 23:35:36.981305 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-db-sync-dcfxt"] Jan 05 23:35:36 crc kubenswrapper[4910]: E0105 23:35:36.981955 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1" containerName="mariadb-database-create" Jan 05 23:35:36 crc kubenswrapper[4910]: I0105 23:35:36.981980 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1" containerName="mariadb-database-create" Jan 05 23:35:36 crc kubenswrapper[4910]: E0105 23:35:36.982012 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2c2edf2-0678-437b-aa0a-1b5448266d93" containerName="mariadb-account-create-update" Jan 05 23:35:36 crc kubenswrapper[4910]: I0105 23:35:36.982021 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2c2edf2-0678-437b-aa0a-1b5448266d93" containerName="mariadb-account-create-update" Jan 05 23:35:36 crc kubenswrapper[4910]: I0105 23:35:36.982334 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2c2edf2-0678-437b-aa0a-1b5448266d93" containerName="mariadb-account-create-update" Jan 05 23:35:36 crc kubenswrapper[4910]: I0105 23:35:36.982362 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1" containerName="mariadb-database-create" Jan 05 23:35:36 crc kubenswrapper[4910]: I0105 23:35:36.983261 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-dcfxt" Jan 05 23:35:36 crc kubenswrapper[4910]: I0105 23:35:36.986733 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-manila-dockercfg-2xnng" Jan 05 23:35:36 crc kubenswrapper[4910]: I0105 23:35:36.986818 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-config-data" Jan 05 23:35:37 crc kubenswrapper[4910]: I0105 23:35:36.997952 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-sync-dcfxt"] Jan 05 23:35:37 crc kubenswrapper[4910]: I0105 23:35:37.052527 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-784wd\" (UniqueName: \"kubernetes.io/projected/1e41b44c-e6c9-473f-870e-52fc55ef73ff-kube-api-access-784wd\") pod \"manila-db-sync-dcfxt\" (UID: \"1e41b44c-e6c9-473f-870e-52fc55ef73ff\") " pod="openstack/manila-db-sync-dcfxt" Jan 05 23:35:37 crc kubenswrapper[4910]: I0105 23:35:37.052685 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e41b44c-e6c9-473f-870e-52fc55ef73ff-combined-ca-bundle\") pod \"manila-db-sync-dcfxt\" (UID: \"1e41b44c-e6c9-473f-870e-52fc55ef73ff\") " pod="openstack/manila-db-sync-dcfxt" Jan 05 23:35:37 crc kubenswrapper[4910]: I0105 23:35:37.052827 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/1e41b44c-e6c9-473f-870e-52fc55ef73ff-job-config-data\") pod \"manila-db-sync-dcfxt\" (UID: \"1e41b44c-e6c9-473f-870e-52fc55ef73ff\") " pod="openstack/manila-db-sync-dcfxt" Jan 05 23:35:37 crc kubenswrapper[4910]: I0105 23:35:37.052886 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e41b44c-e6c9-473f-870e-52fc55ef73ff-config-data\") pod \"manila-db-sync-dcfxt\" (UID: \"1e41b44c-e6c9-473f-870e-52fc55ef73ff\") " pod="openstack/manila-db-sync-dcfxt" Jan 05 23:35:37 crc kubenswrapper[4910]: I0105 23:35:37.154300 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-784wd\" (UniqueName: \"kubernetes.io/projected/1e41b44c-e6c9-473f-870e-52fc55ef73ff-kube-api-access-784wd\") pod \"manila-db-sync-dcfxt\" (UID: \"1e41b44c-e6c9-473f-870e-52fc55ef73ff\") " pod="openstack/manila-db-sync-dcfxt" Jan 05 23:35:37 crc kubenswrapper[4910]: I0105 23:35:37.154393 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e41b44c-e6c9-473f-870e-52fc55ef73ff-combined-ca-bundle\") pod \"manila-db-sync-dcfxt\" (UID: \"1e41b44c-e6c9-473f-870e-52fc55ef73ff\") " pod="openstack/manila-db-sync-dcfxt" Jan 05 23:35:37 crc kubenswrapper[4910]: I0105 23:35:37.154486 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/1e41b44c-e6c9-473f-870e-52fc55ef73ff-job-config-data\") pod \"manila-db-sync-dcfxt\" (UID: \"1e41b44c-e6c9-473f-870e-52fc55ef73ff\") " pod="openstack/manila-db-sync-dcfxt" Jan 05 23:35:37 crc kubenswrapper[4910]: I0105 23:35:37.154521 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e41b44c-e6c9-473f-870e-52fc55ef73ff-config-data\") pod \"manila-db-sync-dcfxt\" (UID: \"1e41b44c-e6c9-473f-870e-52fc55ef73ff\") " pod="openstack/manila-db-sync-dcfxt" Jan 05 23:35:37 crc kubenswrapper[4910]: I0105 23:35:37.161699 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/1e41b44c-e6c9-473f-870e-52fc55ef73ff-job-config-data\") pod \"manila-db-sync-dcfxt\" (UID: \"1e41b44c-e6c9-473f-870e-52fc55ef73ff\") " pod="openstack/manila-db-sync-dcfxt" Jan 05 23:35:37 crc kubenswrapper[4910]: I0105 23:35:37.165697 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e41b44c-e6c9-473f-870e-52fc55ef73ff-config-data\") pod \"manila-db-sync-dcfxt\" (UID: \"1e41b44c-e6c9-473f-870e-52fc55ef73ff\") " pod="openstack/manila-db-sync-dcfxt" Jan 05 23:35:37 crc kubenswrapper[4910]: I0105 23:35:37.170004 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-784wd\" (UniqueName: \"kubernetes.io/projected/1e41b44c-e6c9-473f-870e-52fc55ef73ff-kube-api-access-784wd\") pod \"manila-db-sync-dcfxt\" (UID: \"1e41b44c-e6c9-473f-870e-52fc55ef73ff\") " pod="openstack/manila-db-sync-dcfxt" Jan 05 23:35:37 crc kubenswrapper[4910]: I0105 23:35:37.170037 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e41b44c-e6c9-473f-870e-52fc55ef73ff-combined-ca-bundle\") pod \"manila-db-sync-dcfxt\" (UID: \"1e41b44c-e6c9-473f-870e-52fc55ef73ff\") " pod="openstack/manila-db-sync-dcfxt" Jan 05 23:35:37 crc kubenswrapper[4910]: I0105 23:35:37.309174 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-dcfxt" Jan 05 23:35:37 crc kubenswrapper[4910]: W0105 23:35:37.983312 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e41b44c_e6c9_473f_870e_52fc55ef73ff.slice/crio-bdf4595568a7e723b09c73c81f5ca351a18631e66f0ffc1ac88367d86752901c WatchSource:0}: Error finding container bdf4595568a7e723b09c73c81f5ca351a18631e66f0ffc1ac88367d86752901c: Status 404 returned error can't find the container with id bdf4595568a7e723b09c73c81f5ca351a18631e66f0ffc1ac88367d86752901c Jan 05 23:35:37 crc kubenswrapper[4910]: I0105 23:35:37.991752 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-sync-dcfxt"] Jan 05 23:35:38 crc kubenswrapper[4910]: I0105 23:35:38.962950 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-dcfxt" event={"ID":"1e41b44c-e6c9-473f-870e-52fc55ef73ff","Type":"ContainerStarted","Data":"bdf4595568a7e723b09c73c81f5ca351a18631e66f0ffc1ac88367d86752901c"} Jan 05 23:35:43 crc kubenswrapper[4910]: I0105 23:35:43.014759 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-dcfxt" event={"ID":"1e41b44c-e6c9-473f-870e-52fc55ef73ff","Type":"ContainerStarted","Data":"c1f36164834643d3fe8de7801f0e60d65e3721196177978c65664f8ca20969f7"} Jan 05 23:35:43 crc kubenswrapper[4910]: I0105 23:35:43.038967 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-db-sync-dcfxt" podStartSLOduration=2.9180814980000003 podStartE2EDuration="7.03894349s" podCreationTimestamp="2026-01-05 23:35:36 +0000 UTC" firstStartedPulling="2026-01-05 23:35:37.986332519 +0000 UTC m=+6269.563830199" lastFinishedPulling="2026-01-05 23:35:42.107194521 +0000 UTC m=+6273.684692191" observedRunningTime="2026-01-05 23:35:43.028340477 +0000 UTC m=+6274.605838147" watchObservedRunningTime="2026-01-05 23:35:43.03894349 +0000 UTC m=+6274.616441160" Jan 05 23:35:45 crc kubenswrapper[4910]: I0105 23:35:45.042648 4910 generic.go:334] "Generic (PLEG): container finished" podID="1e41b44c-e6c9-473f-870e-52fc55ef73ff" containerID="c1f36164834643d3fe8de7801f0e60d65e3721196177978c65664f8ca20969f7" exitCode=0 Jan 05 23:35:45 crc kubenswrapper[4910]: I0105 23:35:45.042757 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-dcfxt" event={"ID":"1e41b44c-e6c9-473f-870e-52fc55ef73ff","Type":"ContainerDied","Data":"c1f36164834643d3fe8de7801f0e60d65e3721196177978c65664f8ca20969f7"} Jan 05 23:35:46 crc kubenswrapper[4910]: I0105 23:35:46.621351 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-dcfxt" Jan 05 23:35:46 crc kubenswrapper[4910]: I0105 23:35:46.712898 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/1e41b44c-e6c9-473f-870e-52fc55ef73ff-job-config-data\") pod \"1e41b44c-e6c9-473f-870e-52fc55ef73ff\" (UID: \"1e41b44c-e6c9-473f-870e-52fc55ef73ff\") " Jan 05 23:35:46 crc kubenswrapper[4910]: I0105 23:35:46.712974 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-784wd\" (UniqueName: \"kubernetes.io/projected/1e41b44c-e6c9-473f-870e-52fc55ef73ff-kube-api-access-784wd\") pod \"1e41b44c-e6c9-473f-870e-52fc55ef73ff\" (UID: \"1e41b44c-e6c9-473f-870e-52fc55ef73ff\") " Jan 05 23:35:46 crc kubenswrapper[4910]: I0105 23:35:46.713226 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e41b44c-e6c9-473f-870e-52fc55ef73ff-combined-ca-bundle\") pod \"1e41b44c-e6c9-473f-870e-52fc55ef73ff\" (UID: \"1e41b44c-e6c9-473f-870e-52fc55ef73ff\") " Jan 05 23:35:46 crc kubenswrapper[4910]: I0105 23:35:46.713329 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e41b44c-e6c9-473f-870e-52fc55ef73ff-config-data\") pod \"1e41b44c-e6c9-473f-870e-52fc55ef73ff\" (UID: \"1e41b44c-e6c9-473f-870e-52fc55ef73ff\") " Jan 05 23:35:46 crc kubenswrapper[4910]: I0105 23:35:46.722482 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e41b44c-e6c9-473f-870e-52fc55ef73ff-job-config-data" (OuterVolumeSpecName: "job-config-data") pod "1e41b44c-e6c9-473f-870e-52fc55ef73ff" (UID: "1e41b44c-e6c9-473f-870e-52fc55ef73ff"). InnerVolumeSpecName "job-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:35:46 crc kubenswrapper[4910]: I0105 23:35:46.730381 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e41b44c-e6c9-473f-870e-52fc55ef73ff-kube-api-access-784wd" (OuterVolumeSpecName: "kube-api-access-784wd") pod "1e41b44c-e6c9-473f-870e-52fc55ef73ff" (UID: "1e41b44c-e6c9-473f-870e-52fc55ef73ff"). InnerVolumeSpecName "kube-api-access-784wd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:35:46 crc kubenswrapper[4910]: I0105 23:35:46.730375 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e41b44c-e6c9-473f-870e-52fc55ef73ff-config-data" (OuterVolumeSpecName: "config-data") pod "1e41b44c-e6c9-473f-870e-52fc55ef73ff" (UID: "1e41b44c-e6c9-473f-870e-52fc55ef73ff"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:35:46 crc kubenswrapper[4910]: I0105 23:35:46.770709 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e41b44c-e6c9-473f-870e-52fc55ef73ff-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1e41b44c-e6c9-473f-870e-52fc55ef73ff" (UID: "1e41b44c-e6c9-473f-870e-52fc55ef73ff"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:35:46 crc kubenswrapper[4910]: I0105 23:35:46.816038 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e41b44c-e6c9-473f-870e-52fc55ef73ff-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:35:46 crc kubenswrapper[4910]: I0105 23:35:46.816080 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e41b44c-e6c9-473f-870e-52fc55ef73ff-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:35:46 crc kubenswrapper[4910]: I0105 23:35:46.816091 4910 reconciler_common.go:293] "Volume detached for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/1e41b44c-e6c9-473f-870e-52fc55ef73ff-job-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:35:46 crc kubenswrapper[4910]: I0105 23:35:46.816100 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-784wd\" (UniqueName: \"kubernetes.io/projected/1e41b44c-e6c9-473f-870e-52fc55ef73ff-kube-api-access-784wd\") on node \"crc\" DevicePath \"\"" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.063148 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-dcfxt" event={"ID":"1e41b44c-e6c9-473f-870e-52fc55ef73ff","Type":"ContainerDied","Data":"bdf4595568a7e723b09c73c81f5ca351a18631e66f0ffc1ac88367d86752901c"} Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.063213 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bdf4595568a7e723b09c73c81f5ca351a18631e66f0ffc1ac88367d86752901c" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.063171 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-dcfxt" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.436339 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-scheduler-0"] Jan 05 23:35:47 crc kubenswrapper[4910]: E0105 23:35:47.437077 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e41b44c-e6c9-473f-870e-52fc55ef73ff" containerName="manila-db-sync" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.437103 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e41b44c-e6c9-473f-870e-52fc55ef73ff" containerName="manila-db-sync" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.437349 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e41b44c-e6c9-473f-870e-52fc55ef73ff" containerName="manila-db-sync" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.438792 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.442132 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-config-data" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.445951 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scripts" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.446027 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scheduler-config-data" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.446235 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-manila-dockercfg-2xnng" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.467246 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.531448 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16a933d3-e82f-4dc3-bc84-d961be9aacb2-config-data\") pod \"manila-scheduler-0\" (UID: \"16a933d3-e82f-4dc3-bc84-d961be9aacb2\") " pod="openstack/manila-scheduler-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.531514 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/16a933d3-e82f-4dc3-bc84-d961be9aacb2-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"16a933d3-e82f-4dc3-bc84-d961be9aacb2\") " pod="openstack/manila-scheduler-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.531537 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/16a933d3-e82f-4dc3-bc84-d961be9aacb2-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"16a933d3-e82f-4dc3-bc84-d961be9aacb2\") " pod="openstack/manila-scheduler-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.531608 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rclx\" (UniqueName: \"kubernetes.io/projected/16a933d3-e82f-4dc3-bc84-d961be9aacb2-kube-api-access-8rclx\") pod \"manila-scheduler-0\" (UID: \"16a933d3-e82f-4dc3-bc84-d961be9aacb2\") " pod="openstack/manila-scheduler-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.531652 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16a933d3-e82f-4dc3-bc84-d961be9aacb2-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"16a933d3-e82f-4dc3-bc84-d961be9aacb2\") " pod="openstack/manila-scheduler-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.531761 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/16a933d3-e82f-4dc3-bc84-d961be9aacb2-scripts\") pod \"manila-scheduler-0\" (UID: \"16a933d3-e82f-4dc3-bc84-d961be9aacb2\") " pod="openstack/manila-scheduler-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.598518 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-share-share1-0"] Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.602502 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.610635 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-share-share1-config-data" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.649370 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/140e458d-aa1e-4579-a47e-1b306153da30-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"140e458d-aa1e-4579-a47e-1b306153da30\") " pod="openstack/manila-share-share1-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.649505 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rclx\" (UniqueName: \"kubernetes.io/projected/16a933d3-e82f-4dc3-bc84-d961be9aacb2-kube-api-access-8rclx\") pod \"manila-scheduler-0\" (UID: \"16a933d3-e82f-4dc3-bc84-d961be9aacb2\") " pod="openstack/manila-scheduler-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.649567 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16a933d3-e82f-4dc3-bc84-d961be9aacb2-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"16a933d3-e82f-4dc3-bc84-d961be9aacb2\") " pod="openstack/manila-scheduler-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.649626 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/140e458d-aa1e-4579-a47e-1b306153da30-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"140e458d-aa1e-4579-a47e-1b306153da30\") " pod="openstack/manila-share-share1-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.649783 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/140e458d-aa1e-4579-a47e-1b306153da30-config-data\") pod \"manila-share-share1-0\" (UID: \"140e458d-aa1e-4579-a47e-1b306153da30\") " pod="openstack/manila-share-share1-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.649852 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/140e458d-aa1e-4579-a47e-1b306153da30-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"140e458d-aa1e-4579-a47e-1b306153da30\") " pod="openstack/manila-share-share1-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.649919 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ckb9j\" (UniqueName: \"kubernetes.io/projected/140e458d-aa1e-4579-a47e-1b306153da30-kube-api-access-ckb9j\") pod \"manila-share-share1-0\" (UID: \"140e458d-aa1e-4579-a47e-1b306153da30\") " pod="openstack/manila-share-share1-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.649956 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/140e458d-aa1e-4579-a47e-1b306153da30-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"140e458d-aa1e-4579-a47e-1b306153da30\") " pod="openstack/manila-share-share1-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.650016 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/16a933d3-e82f-4dc3-bc84-d961be9aacb2-scripts\") pod \"manila-scheduler-0\" (UID: \"16a933d3-e82f-4dc3-bc84-d961be9aacb2\") " pod="openstack/manila-scheduler-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.654737 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/140e458d-aa1e-4579-a47e-1b306153da30-scripts\") pod \"manila-share-share1-0\" (UID: \"140e458d-aa1e-4579-a47e-1b306153da30\") " pod="openstack/manila-share-share1-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.654933 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/140e458d-aa1e-4579-a47e-1b306153da30-ceph\") pod \"manila-share-share1-0\" (UID: \"140e458d-aa1e-4579-a47e-1b306153da30\") " pod="openstack/manila-share-share1-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.655049 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16a933d3-e82f-4dc3-bc84-d961be9aacb2-config-data\") pod \"manila-scheduler-0\" (UID: \"16a933d3-e82f-4dc3-bc84-d961be9aacb2\") " pod="openstack/manila-scheduler-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.655093 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/16a933d3-e82f-4dc3-bc84-d961be9aacb2-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"16a933d3-e82f-4dc3-bc84-d961be9aacb2\") " pod="openstack/manila-scheduler-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.655113 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/16a933d3-e82f-4dc3-bc84-d961be9aacb2-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"16a933d3-e82f-4dc3-bc84-d961be9aacb2\") " pod="openstack/manila-scheduler-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.655588 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/16a933d3-e82f-4dc3-bc84-d961be9aacb2-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"16a933d3-e82f-4dc3-bc84-d961be9aacb2\") " pod="openstack/manila-scheduler-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.660264 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.666530 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16a933d3-e82f-4dc3-bc84-d961be9aacb2-config-data\") pod \"manila-scheduler-0\" (UID: \"16a933d3-e82f-4dc3-bc84-d961be9aacb2\") " pod="openstack/manila-scheduler-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.669495 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/16a933d3-e82f-4dc3-bc84-d961be9aacb2-scripts\") pod \"manila-scheduler-0\" (UID: \"16a933d3-e82f-4dc3-bc84-d961be9aacb2\") " pod="openstack/manila-scheduler-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.672055 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16a933d3-e82f-4dc3-bc84-d961be9aacb2-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"16a933d3-e82f-4dc3-bc84-d961be9aacb2\") " pod="openstack/manila-scheduler-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.675728 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rclx\" (UniqueName: \"kubernetes.io/projected/16a933d3-e82f-4dc3-bc84-d961be9aacb2-kube-api-access-8rclx\") pod \"manila-scheduler-0\" (UID: \"16a933d3-e82f-4dc3-bc84-d961be9aacb2\") " pod="openstack/manila-scheduler-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.679415 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/16a933d3-e82f-4dc3-bc84-d961be9aacb2-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"16a933d3-e82f-4dc3-bc84-d961be9aacb2\") " pod="openstack/manila-scheduler-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.709049 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-64977c6f6c-bmckg"] Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.711543 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64977c6f6c-bmckg" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.724860 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-64977c6f6c-bmckg"] Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.757744 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-api-0"] Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.759548 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/395236e1-a608-4655-b98a-80166634b17a-ovsdbserver-sb\") pod \"dnsmasq-dns-64977c6f6c-bmckg\" (UID: \"395236e1-a608-4655-b98a-80166634b17a\") " pod="openstack/dnsmasq-dns-64977c6f6c-bmckg" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.759604 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/395236e1-a608-4655-b98a-80166634b17a-config\") pod \"dnsmasq-dns-64977c6f6c-bmckg\" (UID: \"395236e1-a608-4655-b98a-80166634b17a\") " pod="openstack/dnsmasq-dns-64977c6f6c-bmckg" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.759644 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/140e458d-aa1e-4579-a47e-1b306153da30-scripts\") pod \"manila-share-share1-0\" (UID: \"140e458d-aa1e-4579-a47e-1b306153da30\") " pod="openstack/manila-share-share1-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.759674 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/395236e1-a608-4655-b98a-80166634b17a-dns-svc\") pod \"dnsmasq-dns-64977c6f6c-bmckg\" (UID: \"395236e1-a608-4655-b98a-80166634b17a\") " pod="openstack/dnsmasq-dns-64977c6f6c-bmckg" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.759715 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/140e458d-aa1e-4579-a47e-1b306153da30-ceph\") pod \"manila-share-share1-0\" (UID: \"140e458d-aa1e-4579-a47e-1b306153da30\") " pod="openstack/manila-share-share1-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.759730 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zd4p2\" (UniqueName: \"kubernetes.io/projected/395236e1-a608-4655-b98a-80166634b17a-kube-api-access-zd4p2\") pod \"dnsmasq-dns-64977c6f6c-bmckg\" (UID: \"395236e1-a608-4655-b98a-80166634b17a\") " pod="openstack/dnsmasq-dns-64977c6f6c-bmckg" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.759782 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/395236e1-a608-4655-b98a-80166634b17a-ovsdbserver-nb\") pod \"dnsmasq-dns-64977c6f6c-bmckg\" (UID: \"395236e1-a608-4655-b98a-80166634b17a\") " pod="openstack/dnsmasq-dns-64977c6f6c-bmckg" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.759817 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/140e458d-aa1e-4579-a47e-1b306153da30-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"140e458d-aa1e-4579-a47e-1b306153da30\") " pod="openstack/manila-share-share1-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.759864 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/140e458d-aa1e-4579-a47e-1b306153da30-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"140e458d-aa1e-4579-a47e-1b306153da30\") " pod="openstack/manila-share-share1-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.759902 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/140e458d-aa1e-4579-a47e-1b306153da30-config-data\") pod \"manila-share-share1-0\" (UID: \"140e458d-aa1e-4579-a47e-1b306153da30\") " pod="openstack/manila-share-share1-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.759922 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/140e458d-aa1e-4579-a47e-1b306153da30-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"140e458d-aa1e-4579-a47e-1b306153da30\") " pod="openstack/manila-share-share1-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.759946 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ckb9j\" (UniqueName: \"kubernetes.io/projected/140e458d-aa1e-4579-a47e-1b306153da30-kube-api-access-ckb9j\") pod \"manila-share-share1-0\" (UID: \"140e458d-aa1e-4579-a47e-1b306153da30\") " pod="openstack/manila-share-share1-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.759967 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/140e458d-aa1e-4579-a47e-1b306153da30-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"140e458d-aa1e-4579-a47e-1b306153da30\") " pod="openstack/manila-share-share1-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.760838 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.761293 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.764040 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/140e458d-aa1e-4579-a47e-1b306153da30-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"140e458d-aa1e-4579-a47e-1b306153da30\") " pod="openstack/manila-share-share1-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.764299 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/140e458d-aa1e-4579-a47e-1b306153da30-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"140e458d-aa1e-4579-a47e-1b306153da30\") " pod="openstack/manila-share-share1-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.764480 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/140e458d-aa1e-4579-a47e-1b306153da30-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"140e458d-aa1e-4579-a47e-1b306153da30\") " pod="openstack/manila-share-share1-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.770409 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-api-config-data" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.772882 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/140e458d-aa1e-4579-a47e-1b306153da30-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"140e458d-aa1e-4579-a47e-1b306153da30\") " pod="openstack/manila-share-share1-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.773502 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/140e458d-aa1e-4579-a47e-1b306153da30-scripts\") pod \"manila-share-share1-0\" (UID: \"140e458d-aa1e-4579-a47e-1b306153da30\") " pod="openstack/manila-share-share1-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.773976 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/140e458d-aa1e-4579-a47e-1b306153da30-ceph\") pod \"manila-share-share1-0\" (UID: \"140e458d-aa1e-4579-a47e-1b306153da30\") " pod="openstack/manila-share-share1-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.785715 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.803142 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/140e458d-aa1e-4579-a47e-1b306153da30-config-data\") pod \"manila-share-share1-0\" (UID: \"140e458d-aa1e-4579-a47e-1b306153da30\") " pod="openstack/manila-share-share1-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.803387 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ckb9j\" (UniqueName: \"kubernetes.io/projected/140e458d-aa1e-4579-a47e-1b306153da30-kube-api-access-ckb9j\") pod \"manila-share-share1-0\" (UID: \"140e458d-aa1e-4579-a47e-1b306153da30\") " pod="openstack/manila-share-share1-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.862377 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zd4p2\" (UniqueName: \"kubernetes.io/projected/395236e1-a608-4655-b98a-80166634b17a-kube-api-access-zd4p2\") pod \"dnsmasq-dns-64977c6f6c-bmckg\" (UID: \"395236e1-a608-4655-b98a-80166634b17a\") " pod="openstack/dnsmasq-dns-64977c6f6c-bmckg" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.862599 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mj8nw\" (UniqueName: \"kubernetes.io/projected/ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c-kube-api-access-mj8nw\") pod \"manila-api-0\" (UID: \"ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c\") " pod="openstack/manila-api-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.862650 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/395236e1-a608-4655-b98a-80166634b17a-ovsdbserver-nb\") pod \"dnsmasq-dns-64977c6f6c-bmckg\" (UID: \"395236e1-a608-4655-b98a-80166634b17a\") " pod="openstack/dnsmasq-dns-64977c6f6c-bmckg" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.862667 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c\") " pod="openstack/manila-api-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.863175 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c-etc-machine-id\") pod \"manila-api-0\" (UID: \"ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c\") " pod="openstack/manila-api-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.863273 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c-config-data\") pod \"manila-api-0\" (UID: \"ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c\") " pod="openstack/manila-api-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.863470 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/395236e1-a608-4655-b98a-80166634b17a-ovsdbserver-sb\") pod \"dnsmasq-dns-64977c6f6c-bmckg\" (UID: \"395236e1-a608-4655-b98a-80166634b17a\") " pod="openstack/dnsmasq-dns-64977c6f6c-bmckg" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.863528 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c-config-data-custom\") pod \"manila-api-0\" (UID: \"ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c\") " pod="openstack/manila-api-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.863550 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c-scripts\") pod \"manila-api-0\" (UID: \"ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c\") " pod="openstack/manila-api-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.863578 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/395236e1-a608-4655-b98a-80166634b17a-config\") pod \"dnsmasq-dns-64977c6f6c-bmckg\" (UID: \"395236e1-a608-4655-b98a-80166634b17a\") " pod="openstack/dnsmasq-dns-64977c6f6c-bmckg" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.863640 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c-logs\") pod \"manila-api-0\" (UID: \"ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c\") " pod="openstack/manila-api-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.863709 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/395236e1-a608-4655-b98a-80166634b17a-dns-svc\") pod \"dnsmasq-dns-64977c6f6c-bmckg\" (UID: \"395236e1-a608-4655-b98a-80166634b17a\") " pod="openstack/dnsmasq-dns-64977c6f6c-bmckg" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.864305 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/395236e1-a608-4655-b98a-80166634b17a-ovsdbserver-nb\") pod \"dnsmasq-dns-64977c6f6c-bmckg\" (UID: \"395236e1-a608-4655-b98a-80166634b17a\") " pod="openstack/dnsmasq-dns-64977c6f6c-bmckg" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.864498 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/395236e1-a608-4655-b98a-80166634b17a-dns-svc\") pod \"dnsmasq-dns-64977c6f6c-bmckg\" (UID: \"395236e1-a608-4655-b98a-80166634b17a\") " pod="openstack/dnsmasq-dns-64977c6f6c-bmckg" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.864936 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/395236e1-a608-4655-b98a-80166634b17a-ovsdbserver-sb\") pod \"dnsmasq-dns-64977c6f6c-bmckg\" (UID: \"395236e1-a608-4655-b98a-80166634b17a\") " pod="openstack/dnsmasq-dns-64977c6f6c-bmckg" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.866084 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/395236e1-a608-4655-b98a-80166634b17a-config\") pod \"dnsmasq-dns-64977c6f6c-bmckg\" (UID: \"395236e1-a608-4655-b98a-80166634b17a\") " pod="openstack/dnsmasq-dns-64977c6f6c-bmckg" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.894922 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zd4p2\" (UniqueName: \"kubernetes.io/projected/395236e1-a608-4655-b98a-80166634b17a-kube-api-access-zd4p2\") pod \"dnsmasq-dns-64977c6f6c-bmckg\" (UID: \"395236e1-a608-4655-b98a-80166634b17a\") " pod="openstack/dnsmasq-dns-64977c6f6c-bmckg" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.952094 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.967666 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c-config-data-custom\") pod \"manila-api-0\" (UID: \"ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c\") " pod="openstack/manila-api-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.967895 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c-scripts\") pod \"manila-api-0\" (UID: \"ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c\") " pod="openstack/manila-api-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.967933 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c-logs\") pod \"manila-api-0\" (UID: \"ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c\") " pod="openstack/manila-api-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.967997 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mj8nw\" (UniqueName: \"kubernetes.io/projected/ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c-kube-api-access-mj8nw\") pod \"manila-api-0\" (UID: \"ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c\") " pod="openstack/manila-api-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.968051 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c\") " pod="openstack/manila-api-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.968130 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c-etc-machine-id\") pod \"manila-api-0\" (UID: \"ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c\") " pod="openstack/manila-api-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.968162 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c-config-data\") pod \"manila-api-0\" (UID: \"ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c\") " pod="openstack/manila-api-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.971878 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c-etc-machine-id\") pod \"manila-api-0\" (UID: \"ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c\") " pod="openstack/manila-api-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.972299 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c-logs\") pod \"manila-api-0\" (UID: \"ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c\") " pod="openstack/manila-api-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.972590 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c-config-data-custom\") pod \"manila-api-0\" (UID: \"ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c\") " pod="openstack/manila-api-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.973081 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c-config-data\") pod \"manila-api-0\" (UID: \"ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c\") " pod="openstack/manila-api-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.976784 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c\") " pod="openstack/manila-api-0" Jan 05 23:35:47 crc kubenswrapper[4910]: I0105 23:35:47.979602 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c-scripts\") pod \"manila-api-0\" (UID: \"ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c\") " pod="openstack/manila-api-0" Jan 05 23:35:48 crc kubenswrapper[4910]: I0105 23:35:47.998567 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mj8nw\" (UniqueName: \"kubernetes.io/projected/ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c-kube-api-access-mj8nw\") pod \"manila-api-0\" (UID: \"ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c\") " pod="openstack/manila-api-0" Jan 05 23:35:48 crc kubenswrapper[4910]: I0105 23:35:48.154551 4910 scope.go:117] "RemoveContainer" containerID="e9f8eb2fd303dcdb126d1a8ddb65a5e65f8315f105dbb04c373ae3297e2f2c5a" Jan 05 23:35:48 crc kubenswrapper[4910]: I0105 23:35:48.169340 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64977c6f6c-bmckg" Jan 05 23:35:48 crc kubenswrapper[4910]: I0105 23:35:48.205968 4910 scope.go:117] "RemoveContainer" containerID="2aa6c17139921a32e4cdb0f6f55e90ee7af51529167e92a576aecb257cf02847" Jan 05 23:35:48 crc kubenswrapper[4910]: I0105 23:35:48.243081 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Jan 05 23:35:48 crc kubenswrapper[4910]: I0105 23:35:48.266831 4910 scope.go:117] "RemoveContainer" containerID="ded6a8464e4069c2bb30e9c5db3f6707e373fd87a2528b9dfa68c70a0e3df5be" Jan 05 23:35:48 crc kubenswrapper[4910]: I0105 23:35:48.401189 4910 scope.go:117] "RemoveContainer" containerID="233d33d95be1fca8d90b8a8b08a404f037302322afe421ab5f7da382d3d8ae09" Jan 05 23:35:48 crc kubenswrapper[4910]: I0105 23:35:48.439622 4910 scope.go:117] "RemoveContainer" containerID="1f6e25575c530d8344e8c2a4b8060e2ca007f1d88ef98d44f63e4c14c871dcad" Jan 05 23:35:48 crc kubenswrapper[4910]: I0105 23:35:48.447268 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Jan 05 23:35:48 crc kubenswrapper[4910]: I0105 23:35:48.747472 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Jan 05 23:35:48 crc kubenswrapper[4910]: I0105 23:35:48.855194 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-64977c6f6c-bmckg"] Jan 05 23:35:49 crc kubenswrapper[4910]: I0105 23:35:49.136383 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"140e458d-aa1e-4579-a47e-1b306153da30","Type":"ContainerStarted","Data":"e6975c3d12e1212230366e6dd4cf2c8846b5d6ccda3ed88253978d8ef7e37062"} Jan 05 23:35:49 crc kubenswrapper[4910]: W0105 23:35:49.140227 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podab13a3ad_f39a_4ef5_8861_e59c1c7bb17c.slice/crio-e2e59dd459da7f9ad7c3d718becf3943d4f6b51797afcba8083238881d3c150c WatchSource:0}: Error finding container e2e59dd459da7f9ad7c3d718becf3943d4f6b51797afcba8083238881d3c150c: Status 404 returned error can't find the container with id e2e59dd459da7f9ad7c3d718becf3943d4f6b51797afcba8083238881d3c150c Jan 05 23:35:49 crc kubenswrapper[4910]: I0105 23:35:49.144280 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"16a933d3-e82f-4dc3-bc84-d961be9aacb2","Type":"ContainerStarted","Data":"77e7b3465d47292ca57028d9308b862ad6f092e13305ab999ed09c0369b46ff5"} Jan 05 23:35:49 crc kubenswrapper[4910]: I0105 23:35:49.147919 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Jan 05 23:35:49 crc kubenswrapper[4910]: I0105 23:35:49.151508 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64977c6f6c-bmckg" event={"ID":"395236e1-a608-4655-b98a-80166634b17a","Type":"ContainerStarted","Data":"967a557d1e44cf2b5026bac3638ff46b1a7bceb59f8f0064abff041785280f6b"} Jan 05 23:35:49 crc kubenswrapper[4910]: I0105 23:35:49.722812 4910 scope.go:117] "RemoveContainer" containerID="f0334e4359831541c2f90c0defd8867b10c126be0235edac4acd76d34a0ba514" Jan 05 23:35:49 crc kubenswrapper[4910]: E0105 23:35:49.723491 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:35:50 crc kubenswrapper[4910]: I0105 23:35:50.178355 4910 generic.go:334] "Generic (PLEG): container finished" podID="395236e1-a608-4655-b98a-80166634b17a" containerID="a767872039910c88bead140428a66d9e014645a67e3b0a3ca308abbe270ee0a6" exitCode=0 Jan 05 23:35:50 crc kubenswrapper[4910]: I0105 23:35:50.178459 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64977c6f6c-bmckg" event={"ID":"395236e1-a608-4655-b98a-80166634b17a","Type":"ContainerDied","Data":"a767872039910c88bead140428a66d9e014645a67e3b0a3ca308abbe270ee0a6"} Jan 05 23:35:50 crc kubenswrapper[4910]: I0105 23:35:50.202905 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c","Type":"ContainerStarted","Data":"91497d95cbb5fc390d91ac8972193cfe5f00c1d490a091198a519ef5f57ace81"} Jan 05 23:35:50 crc kubenswrapper[4910]: I0105 23:35:50.202952 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c","Type":"ContainerStarted","Data":"e2e59dd459da7f9ad7c3d718becf3943d4f6b51797afcba8083238881d3c150c"} Jan 05 23:35:50 crc kubenswrapper[4910]: I0105 23:35:50.213318 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"16a933d3-e82f-4dc3-bc84-d961be9aacb2","Type":"ContainerStarted","Data":"c0823bac243f34d89045218ec3d9e8ff0c69d32590f0cf9188f20158d101e2fc"} Jan 05 23:35:51 crc kubenswrapper[4910]: I0105 23:35:51.229608 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"16a933d3-e82f-4dc3-bc84-d961be9aacb2","Type":"ContainerStarted","Data":"f6374352c6b695dfd93b10cf548f2a7e9698e22b9a9344d418c144a2ddcd5618"} Jan 05 23:35:51 crc kubenswrapper[4910]: I0105 23:35:51.236475 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64977c6f6c-bmckg" event={"ID":"395236e1-a608-4655-b98a-80166634b17a","Type":"ContainerStarted","Data":"997b7bd68313c3820caae4772b4c17ad6252d7a6006895d0a6b20fd7ed7ee588"} Jan 05 23:35:51 crc kubenswrapper[4910]: I0105 23:35:51.236611 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-64977c6f6c-bmckg" Jan 05 23:35:51 crc kubenswrapper[4910]: I0105 23:35:51.238444 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c","Type":"ContainerStarted","Data":"41798ed759c583d5757215cfb8358c518bd48d1aa7618b587a3020f2995611e0"} Jan 05 23:35:51 crc kubenswrapper[4910]: I0105 23:35:51.240155 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Jan 05 23:35:51 crc kubenswrapper[4910]: I0105 23:35:51.263219 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-scheduler-0" podStartSLOduration=3.520450817 podStartE2EDuration="4.263196987s" podCreationTimestamp="2026-01-05 23:35:47 +0000 UTC" firstStartedPulling="2026-01-05 23:35:48.48554359 +0000 UTC m=+6280.063041260" lastFinishedPulling="2026-01-05 23:35:49.22828976 +0000 UTC m=+6280.805787430" observedRunningTime="2026-01-05 23:35:51.254335176 +0000 UTC m=+6282.831832846" watchObservedRunningTime="2026-01-05 23:35:51.263196987 +0000 UTC m=+6282.840694657" Jan 05 23:35:51 crc kubenswrapper[4910]: I0105 23:35:51.292136 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-api-0" podStartSLOduration=4.292088426 podStartE2EDuration="4.292088426s" podCreationTimestamp="2026-01-05 23:35:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:35:51.275188723 +0000 UTC m=+6282.852686393" watchObservedRunningTime="2026-01-05 23:35:51.292088426 +0000 UTC m=+6282.869586106" Jan 05 23:35:51 crc kubenswrapper[4910]: I0105 23:35:51.310517 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-64977c6f6c-bmckg" podStartSLOduration=4.310498656 podStartE2EDuration="4.310498656s" podCreationTimestamp="2026-01-05 23:35:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:35:51.302264539 +0000 UTC m=+6282.879762209" watchObservedRunningTime="2026-01-05 23:35:51.310498656 +0000 UTC m=+6282.887996326" Jan 05 23:35:53 crc kubenswrapper[4910]: I0105 23:35:53.038108 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-85jzg"] Jan 05 23:35:53 crc kubenswrapper[4910]: I0105 23:35:53.052214 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-b44a-account-create-update-8k545"] Jan 05 23:35:53 crc kubenswrapper[4910]: I0105 23:35:53.060908 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-b44a-account-create-update-8k545"] Jan 05 23:35:53 crc kubenswrapper[4910]: I0105 23:35:53.070956 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-85jzg"] Jan 05 23:35:54 crc kubenswrapper[4910]: I0105 23:35:54.734232 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="23d3522b-b3e1-43f4-91ee-3617ee3b5a15" path="/var/lib/kubelet/pods/23d3522b-b3e1-43f4-91ee-3617ee3b5a15/volumes" Jan 05 23:35:54 crc kubenswrapper[4910]: I0105 23:35:54.735438 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f9225c95-afc5-491e-8b18-0cb08272e8ae" path="/var/lib/kubelet/pods/f9225c95-afc5-491e-8b18-0cb08272e8ae/volumes" Jan 05 23:35:56 crc kubenswrapper[4910]: I0105 23:35:56.399222 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"140e458d-aa1e-4579-a47e-1b306153da30","Type":"ContainerStarted","Data":"12dbbada7cc03cb7f8f875eb3888fca0ef749323f0173c95442330968ea696e4"} Jan 05 23:35:57 crc kubenswrapper[4910]: I0105 23:35:57.222179 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 05 23:35:57 crc kubenswrapper[4910]: I0105 23:35:57.409101 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"140e458d-aa1e-4579-a47e-1b306153da30","Type":"ContainerStarted","Data":"363674b252a1d31b9a8fb40f6c60ad783e3bc938c38d18d797dc8cb142a1d7a4"} Jan 05 23:35:57 crc kubenswrapper[4910]: I0105 23:35:57.433417 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-share-share1-0" podStartSLOduration=3.490208506 podStartE2EDuration="10.433392082s" podCreationTimestamp="2026-01-05 23:35:47 +0000 UTC" firstStartedPulling="2026-01-05 23:35:48.743391442 +0000 UTC m=+6280.320889112" lastFinishedPulling="2026-01-05 23:35:55.686574978 +0000 UTC m=+6287.264072688" observedRunningTime="2026-01-05 23:35:57.43205197 +0000 UTC m=+6289.009549640" watchObservedRunningTime="2026-01-05 23:35:57.433392082 +0000 UTC m=+6289.010889762" Jan 05 23:35:57 crc kubenswrapper[4910]: I0105 23:35:57.762616 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-scheduler-0" Jan 05 23:35:57 crc kubenswrapper[4910]: I0105 23:35:57.953146 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Jan 05 23:35:58 crc kubenswrapper[4910]: I0105 23:35:58.172418 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-64977c6f6c-bmckg" Jan 05 23:35:58 crc kubenswrapper[4910]: I0105 23:35:58.251610 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6cb64bf69-8dbkn"] Jan 05 23:35:58 crc kubenswrapper[4910]: I0105 23:35:58.251953 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6cb64bf69-8dbkn" podUID="030ee9ad-b562-41c2-b1e2-f5be9e4e13b0" containerName="dnsmasq-dns" containerID="cri-o://2089369f33fa2bba993962ac1a23b91cc59807065c237d34e497fb3ad6218ed6" gracePeriod=10 Jan 05 23:35:58 crc kubenswrapper[4910]: I0105 23:35:58.504065 4910 generic.go:334] "Generic (PLEG): container finished" podID="030ee9ad-b562-41c2-b1e2-f5be9e4e13b0" containerID="2089369f33fa2bba993962ac1a23b91cc59807065c237d34e497fb3ad6218ed6" exitCode=0 Jan 05 23:35:58 crc kubenswrapper[4910]: I0105 23:35:58.504827 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cb64bf69-8dbkn" event={"ID":"030ee9ad-b562-41c2-b1e2-f5be9e4e13b0","Type":"ContainerDied","Data":"2089369f33fa2bba993962ac1a23b91cc59807065c237d34e497fb3ad6218ed6"} Jan 05 23:35:58 crc kubenswrapper[4910]: I0105 23:35:58.913839 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cb64bf69-8dbkn" Jan 05 23:35:59 crc kubenswrapper[4910]: I0105 23:35:59.092367 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/030ee9ad-b562-41c2-b1e2-f5be9e4e13b0-config\") pod \"030ee9ad-b562-41c2-b1e2-f5be9e4e13b0\" (UID: \"030ee9ad-b562-41c2-b1e2-f5be9e4e13b0\") " Jan 05 23:35:59 crc kubenswrapper[4910]: I0105 23:35:59.092419 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/030ee9ad-b562-41c2-b1e2-f5be9e4e13b0-ovsdbserver-nb\") pod \"030ee9ad-b562-41c2-b1e2-f5be9e4e13b0\" (UID: \"030ee9ad-b562-41c2-b1e2-f5be9e4e13b0\") " Jan 05 23:35:59 crc kubenswrapper[4910]: I0105 23:35:59.092483 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vmrx8\" (UniqueName: \"kubernetes.io/projected/030ee9ad-b562-41c2-b1e2-f5be9e4e13b0-kube-api-access-vmrx8\") pod \"030ee9ad-b562-41c2-b1e2-f5be9e4e13b0\" (UID: \"030ee9ad-b562-41c2-b1e2-f5be9e4e13b0\") " Jan 05 23:35:59 crc kubenswrapper[4910]: I0105 23:35:59.092545 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/030ee9ad-b562-41c2-b1e2-f5be9e4e13b0-dns-svc\") pod \"030ee9ad-b562-41c2-b1e2-f5be9e4e13b0\" (UID: \"030ee9ad-b562-41c2-b1e2-f5be9e4e13b0\") " Jan 05 23:35:59 crc kubenswrapper[4910]: I0105 23:35:59.093477 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/030ee9ad-b562-41c2-b1e2-f5be9e4e13b0-ovsdbserver-sb\") pod \"030ee9ad-b562-41c2-b1e2-f5be9e4e13b0\" (UID: \"030ee9ad-b562-41c2-b1e2-f5be9e4e13b0\") " Jan 05 23:35:59 crc kubenswrapper[4910]: I0105 23:35:59.097947 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/030ee9ad-b562-41c2-b1e2-f5be9e4e13b0-kube-api-access-vmrx8" (OuterVolumeSpecName: "kube-api-access-vmrx8") pod "030ee9ad-b562-41c2-b1e2-f5be9e4e13b0" (UID: "030ee9ad-b562-41c2-b1e2-f5be9e4e13b0"). InnerVolumeSpecName "kube-api-access-vmrx8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:35:59 crc kubenswrapper[4910]: I0105 23:35:59.144827 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/030ee9ad-b562-41c2-b1e2-f5be9e4e13b0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "030ee9ad-b562-41c2-b1e2-f5be9e4e13b0" (UID: "030ee9ad-b562-41c2-b1e2-f5be9e4e13b0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:35:59 crc kubenswrapper[4910]: I0105 23:35:59.147073 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/030ee9ad-b562-41c2-b1e2-f5be9e4e13b0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "030ee9ad-b562-41c2-b1e2-f5be9e4e13b0" (UID: "030ee9ad-b562-41c2-b1e2-f5be9e4e13b0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:35:59 crc kubenswrapper[4910]: I0105 23:35:59.158237 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/030ee9ad-b562-41c2-b1e2-f5be9e4e13b0-config" (OuterVolumeSpecName: "config") pod "030ee9ad-b562-41c2-b1e2-f5be9e4e13b0" (UID: "030ee9ad-b562-41c2-b1e2-f5be9e4e13b0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:35:59 crc kubenswrapper[4910]: I0105 23:35:59.160114 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/030ee9ad-b562-41c2-b1e2-f5be9e4e13b0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "030ee9ad-b562-41c2-b1e2-f5be9e4e13b0" (UID: "030ee9ad-b562-41c2-b1e2-f5be9e4e13b0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:35:59 crc kubenswrapper[4910]: I0105 23:35:59.196306 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/030ee9ad-b562-41c2-b1e2-f5be9e4e13b0-config\") on node \"crc\" DevicePath \"\"" Jan 05 23:35:59 crc kubenswrapper[4910]: I0105 23:35:59.196341 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/030ee9ad-b562-41c2-b1e2-f5be9e4e13b0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 05 23:35:59 crc kubenswrapper[4910]: I0105 23:35:59.196356 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vmrx8\" (UniqueName: \"kubernetes.io/projected/030ee9ad-b562-41c2-b1e2-f5be9e4e13b0-kube-api-access-vmrx8\") on node \"crc\" DevicePath \"\"" Jan 05 23:35:59 crc kubenswrapper[4910]: I0105 23:35:59.196366 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/030ee9ad-b562-41c2-b1e2-f5be9e4e13b0-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 05 23:35:59 crc kubenswrapper[4910]: I0105 23:35:59.196375 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/030ee9ad-b562-41c2-b1e2-f5be9e4e13b0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 05 23:35:59 crc kubenswrapper[4910]: I0105 23:35:59.517809 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cb64bf69-8dbkn" Jan 05 23:35:59 crc kubenswrapper[4910]: I0105 23:35:59.518022 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cb64bf69-8dbkn" event={"ID":"030ee9ad-b562-41c2-b1e2-f5be9e4e13b0","Type":"ContainerDied","Data":"0fc546a02c24ae0ffe9ff7651fd1e3eeb800dc4556aa61d46fe2662b4623ef28"} Jan 05 23:35:59 crc kubenswrapper[4910]: I0105 23:35:59.518368 4910 scope.go:117] "RemoveContainer" containerID="2089369f33fa2bba993962ac1a23b91cc59807065c237d34e497fb3ad6218ed6" Jan 05 23:35:59 crc kubenswrapper[4910]: I0105 23:35:59.544299 4910 scope.go:117] "RemoveContainer" containerID="f387a0d309f3dbadce8b4ac31158b07dd41a8bef7625f3938029eea9c9a41889" Jan 05 23:35:59 crc kubenswrapper[4910]: I0105 23:35:59.556180 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6cb64bf69-8dbkn"] Jan 05 23:35:59 crc kubenswrapper[4910]: I0105 23:35:59.570332 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6cb64bf69-8dbkn"] Jan 05 23:36:00 crc kubenswrapper[4910]: I0105 23:36:00.777276 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="030ee9ad-b562-41c2-b1e2-f5be9e4e13b0" path="/var/lib/kubelet/pods/030ee9ad-b562-41c2-b1e2-f5be9e4e13b0/volumes" Jan 05 23:36:00 crc kubenswrapper[4910]: I0105 23:36:00.781964 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 05 23:36:00 crc kubenswrapper[4910]: I0105 23:36:00.782245 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7d9105dc-6783-4325-87e9-6f6cc9389320" containerName="ceilometer-central-agent" containerID="cri-o://c5cf4184f71b27c0c31761d57e63cb096a483d57d1127c333686f93eeba5ab56" gracePeriod=30 Jan 05 23:36:00 crc kubenswrapper[4910]: I0105 23:36:00.782411 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7d9105dc-6783-4325-87e9-6f6cc9389320" containerName="proxy-httpd" containerID="cri-o://c5d8a0fb68e2878cb8b8ca86d73e070b93a474ac96eb7790ea132e94f178ab9c" gracePeriod=30 Jan 05 23:36:00 crc kubenswrapper[4910]: I0105 23:36:00.782513 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7d9105dc-6783-4325-87e9-6f6cc9389320" containerName="ceilometer-notification-agent" containerID="cri-o://97a7ebad088f6af984b0e6fb9aeb645cbdd13d94e95a57914af3a7a6d7cbb8ad" gracePeriod=30 Jan 05 23:36:00 crc kubenswrapper[4910]: I0105 23:36:00.782689 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7d9105dc-6783-4325-87e9-6f6cc9389320" containerName="sg-core" containerID="cri-o://a3f4e83aed3b4c26a785d83579d5ab445ed3a45aebb26a6911213620b13bd26b" gracePeriod=30 Jan 05 23:36:01 crc kubenswrapper[4910]: I0105 23:36:01.047104 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-8hflm"] Jan 05 23:36:01 crc kubenswrapper[4910]: I0105 23:36:01.056215 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-8hflm"] Jan 05 23:36:01 crc kubenswrapper[4910]: I0105 23:36:01.602291 4910 generic.go:334] "Generic (PLEG): container finished" podID="7d9105dc-6783-4325-87e9-6f6cc9389320" containerID="c5d8a0fb68e2878cb8b8ca86d73e070b93a474ac96eb7790ea132e94f178ab9c" exitCode=0 Jan 05 23:36:01 crc kubenswrapper[4910]: I0105 23:36:01.602548 4910 generic.go:334] "Generic (PLEG): container finished" podID="7d9105dc-6783-4325-87e9-6f6cc9389320" containerID="a3f4e83aed3b4c26a785d83579d5ab445ed3a45aebb26a6911213620b13bd26b" exitCode=2 Jan 05 23:36:01 crc kubenswrapper[4910]: I0105 23:36:01.602371 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7d9105dc-6783-4325-87e9-6f6cc9389320","Type":"ContainerDied","Data":"c5d8a0fb68e2878cb8b8ca86d73e070b93a474ac96eb7790ea132e94f178ab9c"} Jan 05 23:36:01 crc kubenswrapper[4910]: I0105 23:36:01.602597 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7d9105dc-6783-4325-87e9-6f6cc9389320","Type":"ContainerDied","Data":"a3f4e83aed3b4c26a785d83579d5ab445ed3a45aebb26a6911213620b13bd26b"} Jan 05 23:36:01 crc kubenswrapper[4910]: I0105 23:36:01.602611 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7d9105dc-6783-4325-87e9-6f6cc9389320","Type":"ContainerDied","Data":"c5cf4184f71b27c0c31761d57e63cb096a483d57d1127c333686f93eeba5ab56"} Jan 05 23:36:01 crc kubenswrapper[4910]: I0105 23:36:01.602556 4910 generic.go:334] "Generic (PLEG): container finished" podID="7d9105dc-6783-4325-87e9-6f6cc9389320" containerID="c5cf4184f71b27c0c31761d57e63cb096a483d57d1127c333686f93eeba5ab56" exitCode=0 Jan 05 23:36:01 crc kubenswrapper[4910]: I0105 23:36:01.722292 4910 scope.go:117] "RemoveContainer" containerID="f0334e4359831541c2f90c0defd8867b10c126be0235edac4acd76d34a0ba514" Jan 05 23:36:01 crc kubenswrapper[4910]: E0105 23:36:01.722648 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:36:02 crc kubenswrapper[4910]: I0105 23:36:02.744721 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20e5ef58-99f5-44ad-bcbd-310ab1052ce2" path="/var/lib/kubelet/pods/20e5ef58-99f5-44ad-bcbd-310ab1052ce2/volumes" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.340382 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.438880 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4f77\" (UniqueName: \"kubernetes.io/projected/7d9105dc-6783-4325-87e9-6f6cc9389320-kube-api-access-s4f77\") pod \"7d9105dc-6783-4325-87e9-6f6cc9389320\" (UID: \"7d9105dc-6783-4325-87e9-6f6cc9389320\") " Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.439042 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d9105dc-6783-4325-87e9-6f6cc9389320-combined-ca-bundle\") pod \"7d9105dc-6783-4325-87e9-6f6cc9389320\" (UID: \"7d9105dc-6783-4325-87e9-6f6cc9389320\") " Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.439081 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d9105dc-6783-4325-87e9-6f6cc9389320-config-data\") pod \"7d9105dc-6783-4325-87e9-6f6cc9389320\" (UID: \"7d9105dc-6783-4325-87e9-6f6cc9389320\") " Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.439164 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7d9105dc-6783-4325-87e9-6f6cc9389320-log-httpd\") pod \"7d9105dc-6783-4325-87e9-6f6cc9389320\" (UID: \"7d9105dc-6783-4325-87e9-6f6cc9389320\") " Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.439263 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7d9105dc-6783-4325-87e9-6f6cc9389320-run-httpd\") pod \"7d9105dc-6783-4325-87e9-6f6cc9389320\" (UID: \"7d9105dc-6783-4325-87e9-6f6cc9389320\") " Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.439319 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7d9105dc-6783-4325-87e9-6f6cc9389320-sg-core-conf-yaml\") pod \"7d9105dc-6783-4325-87e9-6f6cc9389320\" (UID: \"7d9105dc-6783-4325-87e9-6f6cc9389320\") " Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.439374 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7d9105dc-6783-4325-87e9-6f6cc9389320-scripts\") pod \"7d9105dc-6783-4325-87e9-6f6cc9389320\" (UID: \"7d9105dc-6783-4325-87e9-6f6cc9389320\") " Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.440055 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7d9105dc-6783-4325-87e9-6f6cc9389320-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "7d9105dc-6783-4325-87e9-6f6cc9389320" (UID: "7d9105dc-6783-4325-87e9-6f6cc9389320"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.440204 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7d9105dc-6783-4325-87e9-6f6cc9389320-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "7d9105dc-6783-4325-87e9-6f6cc9389320" (UID: "7d9105dc-6783-4325-87e9-6f6cc9389320"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.448247 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7d9105dc-6783-4325-87e9-6f6cc9389320-kube-api-access-s4f77" (OuterVolumeSpecName: "kube-api-access-s4f77") pod "7d9105dc-6783-4325-87e9-6f6cc9389320" (UID: "7d9105dc-6783-4325-87e9-6f6cc9389320"). InnerVolumeSpecName "kube-api-access-s4f77". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.453215 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7d9105dc-6783-4325-87e9-6f6cc9389320-scripts" (OuterVolumeSpecName: "scripts") pod "7d9105dc-6783-4325-87e9-6f6cc9389320" (UID: "7d9105dc-6783-4325-87e9-6f6cc9389320"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.483758 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7d9105dc-6783-4325-87e9-6f6cc9389320-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "7d9105dc-6783-4325-87e9-6f6cc9389320" (UID: "7d9105dc-6783-4325-87e9-6f6cc9389320"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.523047 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7d9105dc-6783-4325-87e9-6f6cc9389320-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7d9105dc-6783-4325-87e9-6f6cc9389320" (UID: "7d9105dc-6783-4325-87e9-6f6cc9389320"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.542409 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d9105dc-6783-4325-87e9-6f6cc9389320-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.542450 4910 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7d9105dc-6783-4325-87e9-6f6cc9389320-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.542463 4910 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7d9105dc-6783-4325-87e9-6f6cc9389320-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.542474 4910 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7d9105dc-6783-4325-87e9-6f6cc9389320-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.542485 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7d9105dc-6783-4325-87e9-6f6cc9389320-scripts\") on node \"crc\" DevicePath \"\"" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.542501 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4f77\" (UniqueName: \"kubernetes.io/projected/7d9105dc-6783-4325-87e9-6f6cc9389320-kube-api-access-s4f77\") on node \"crc\" DevicePath \"\"" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.567417 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7d9105dc-6783-4325-87e9-6f6cc9389320-config-data" (OuterVolumeSpecName: "config-data") pod "7d9105dc-6783-4325-87e9-6f6cc9389320" (UID: "7d9105dc-6783-4325-87e9-6f6cc9389320"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.644954 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d9105dc-6783-4325-87e9-6f6cc9389320-config-data\") on node \"crc\" DevicePath \"\"" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.649357 4910 generic.go:334] "Generic (PLEG): container finished" podID="7d9105dc-6783-4325-87e9-6f6cc9389320" containerID="97a7ebad088f6af984b0e6fb9aeb645cbdd13d94e95a57914af3a7a6d7cbb8ad" exitCode=0 Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.649405 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7d9105dc-6783-4325-87e9-6f6cc9389320","Type":"ContainerDied","Data":"97a7ebad088f6af984b0e6fb9aeb645cbdd13d94e95a57914af3a7a6d7cbb8ad"} Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.649443 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7d9105dc-6783-4325-87e9-6f6cc9389320","Type":"ContainerDied","Data":"3d638b03aeb077804c037203878f165051ee7356c61ee7fd3c64a8a87429c19d"} Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.649445 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.649462 4910 scope.go:117] "RemoveContainer" containerID="c5d8a0fb68e2878cb8b8ca86d73e070b93a474ac96eb7790ea132e94f178ab9c" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.669359 4910 scope.go:117] "RemoveContainer" containerID="a3f4e83aed3b4c26a785d83579d5ab445ed3a45aebb26a6911213620b13bd26b" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.702724 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.704950 4910 scope.go:117] "RemoveContainer" containerID="97a7ebad088f6af984b0e6fb9aeb645cbdd13d94e95a57914af3a7a6d7cbb8ad" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.716386 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.728185 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 05 23:36:05 crc kubenswrapper[4910]: E0105 23:36:05.728885 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d9105dc-6783-4325-87e9-6f6cc9389320" containerName="sg-core" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.728947 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d9105dc-6783-4325-87e9-6f6cc9389320" containerName="sg-core" Jan 05 23:36:05 crc kubenswrapper[4910]: E0105 23:36:05.729017 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d9105dc-6783-4325-87e9-6f6cc9389320" containerName="proxy-httpd" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.729065 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d9105dc-6783-4325-87e9-6f6cc9389320" containerName="proxy-httpd" Jan 05 23:36:05 crc kubenswrapper[4910]: E0105 23:36:05.729192 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="030ee9ad-b562-41c2-b1e2-f5be9e4e13b0" containerName="dnsmasq-dns" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.729260 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="030ee9ad-b562-41c2-b1e2-f5be9e4e13b0" containerName="dnsmasq-dns" Jan 05 23:36:05 crc kubenswrapper[4910]: E0105 23:36:05.729325 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d9105dc-6783-4325-87e9-6f6cc9389320" containerName="ceilometer-central-agent" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.729375 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d9105dc-6783-4325-87e9-6f6cc9389320" containerName="ceilometer-central-agent" Jan 05 23:36:05 crc kubenswrapper[4910]: E0105 23:36:05.729426 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d9105dc-6783-4325-87e9-6f6cc9389320" containerName="ceilometer-notification-agent" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.729474 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d9105dc-6783-4325-87e9-6f6cc9389320" containerName="ceilometer-notification-agent" Jan 05 23:36:05 crc kubenswrapper[4910]: E0105 23:36:05.729535 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="030ee9ad-b562-41c2-b1e2-f5be9e4e13b0" containerName="init" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.729582 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="030ee9ad-b562-41c2-b1e2-f5be9e4e13b0" containerName="init" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.729820 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d9105dc-6783-4325-87e9-6f6cc9389320" containerName="ceilometer-central-agent" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.729952 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d9105dc-6783-4325-87e9-6f6cc9389320" containerName="ceilometer-notification-agent" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.730015 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d9105dc-6783-4325-87e9-6f6cc9389320" containerName="sg-core" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.730078 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d9105dc-6783-4325-87e9-6f6cc9389320" containerName="proxy-httpd" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.730144 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="030ee9ad-b562-41c2-b1e2-f5be9e4e13b0" containerName="dnsmasq-dns" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.730228 4910 scope.go:117] "RemoveContainer" containerID="c5cf4184f71b27c0c31761d57e63cb096a483d57d1127c333686f93eeba5ab56" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.732287 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.736871 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.737087 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.741388 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.768205 4910 scope.go:117] "RemoveContainer" containerID="c5d8a0fb68e2878cb8b8ca86d73e070b93a474ac96eb7790ea132e94f178ab9c" Jan 05 23:36:05 crc kubenswrapper[4910]: E0105 23:36:05.768655 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c5d8a0fb68e2878cb8b8ca86d73e070b93a474ac96eb7790ea132e94f178ab9c\": container with ID starting with c5d8a0fb68e2878cb8b8ca86d73e070b93a474ac96eb7790ea132e94f178ab9c not found: ID does not exist" containerID="c5d8a0fb68e2878cb8b8ca86d73e070b93a474ac96eb7790ea132e94f178ab9c" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.768687 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c5d8a0fb68e2878cb8b8ca86d73e070b93a474ac96eb7790ea132e94f178ab9c"} err="failed to get container status \"c5d8a0fb68e2878cb8b8ca86d73e070b93a474ac96eb7790ea132e94f178ab9c\": rpc error: code = NotFound desc = could not find container \"c5d8a0fb68e2878cb8b8ca86d73e070b93a474ac96eb7790ea132e94f178ab9c\": container with ID starting with c5d8a0fb68e2878cb8b8ca86d73e070b93a474ac96eb7790ea132e94f178ab9c not found: ID does not exist" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.768711 4910 scope.go:117] "RemoveContainer" containerID="a3f4e83aed3b4c26a785d83579d5ab445ed3a45aebb26a6911213620b13bd26b" Jan 05 23:36:05 crc kubenswrapper[4910]: E0105 23:36:05.768985 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a3f4e83aed3b4c26a785d83579d5ab445ed3a45aebb26a6911213620b13bd26b\": container with ID starting with a3f4e83aed3b4c26a785d83579d5ab445ed3a45aebb26a6911213620b13bd26b not found: ID does not exist" containerID="a3f4e83aed3b4c26a785d83579d5ab445ed3a45aebb26a6911213620b13bd26b" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.769022 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a3f4e83aed3b4c26a785d83579d5ab445ed3a45aebb26a6911213620b13bd26b"} err="failed to get container status \"a3f4e83aed3b4c26a785d83579d5ab445ed3a45aebb26a6911213620b13bd26b\": rpc error: code = NotFound desc = could not find container \"a3f4e83aed3b4c26a785d83579d5ab445ed3a45aebb26a6911213620b13bd26b\": container with ID starting with a3f4e83aed3b4c26a785d83579d5ab445ed3a45aebb26a6911213620b13bd26b not found: ID does not exist" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.769050 4910 scope.go:117] "RemoveContainer" containerID="97a7ebad088f6af984b0e6fb9aeb645cbdd13d94e95a57914af3a7a6d7cbb8ad" Jan 05 23:36:05 crc kubenswrapper[4910]: E0105 23:36:05.769335 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"97a7ebad088f6af984b0e6fb9aeb645cbdd13d94e95a57914af3a7a6d7cbb8ad\": container with ID starting with 97a7ebad088f6af984b0e6fb9aeb645cbdd13d94e95a57914af3a7a6d7cbb8ad not found: ID does not exist" containerID="97a7ebad088f6af984b0e6fb9aeb645cbdd13d94e95a57914af3a7a6d7cbb8ad" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.769371 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"97a7ebad088f6af984b0e6fb9aeb645cbdd13d94e95a57914af3a7a6d7cbb8ad"} err="failed to get container status \"97a7ebad088f6af984b0e6fb9aeb645cbdd13d94e95a57914af3a7a6d7cbb8ad\": rpc error: code = NotFound desc = could not find container \"97a7ebad088f6af984b0e6fb9aeb645cbdd13d94e95a57914af3a7a6d7cbb8ad\": container with ID starting with 97a7ebad088f6af984b0e6fb9aeb645cbdd13d94e95a57914af3a7a6d7cbb8ad not found: ID does not exist" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.769386 4910 scope.go:117] "RemoveContainer" containerID="c5cf4184f71b27c0c31761d57e63cb096a483d57d1127c333686f93eeba5ab56" Jan 05 23:36:05 crc kubenswrapper[4910]: E0105 23:36:05.769611 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c5cf4184f71b27c0c31761d57e63cb096a483d57d1127c333686f93eeba5ab56\": container with ID starting with c5cf4184f71b27c0c31761d57e63cb096a483d57d1127c333686f93eeba5ab56 not found: ID does not exist" containerID="c5cf4184f71b27c0c31761d57e63cb096a483d57d1127c333686f93eeba5ab56" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.769638 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c5cf4184f71b27c0c31761d57e63cb096a483d57d1127c333686f93eeba5ab56"} err="failed to get container status \"c5cf4184f71b27c0c31761d57e63cb096a483d57d1127c333686f93eeba5ab56\": rpc error: code = NotFound desc = could not find container \"c5cf4184f71b27c0c31761d57e63cb096a483d57d1127c333686f93eeba5ab56\": container with ID starting with c5cf4184f71b27c0c31761d57e63cb096a483d57d1127c333686f93eeba5ab56 not found: ID does not exist" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.859275 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dcced35e-d7c2-4f85-9e90-16ab520684b3-run-httpd\") pod \"ceilometer-0\" (UID: \"dcced35e-d7c2-4f85-9e90-16ab520684b3\") " pod="openstack/ceilometer-0" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.859543 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dcced35e-d7c2-4f85-9e90-16ab520684b3-scripts\") pod \"ceilometer-0\" (UID: \"dcced35e-d7c2-4f85-9e90-16ab520684b3\") " pod="openstack/ceilometer-0" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.859625 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96qlt\" (UniqueName: \"kubernetes.io/projected/dcced35e-d7c2-4f85-9e90-16ab520684b3-kube-api-access-96qlt\") pod \"ceilometer-0\" (UID: \"dcced35e-d7c2-4f85-9e90-16ab520684b3\") " pod="openstack/ceilometer-0" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.860188 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dcced35e-d7c2-4f85-9e90-16ab520684b3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"dcced35e-d7c2-4f85-9e90-16ab520684b3\") " pod="openstack/ceilometer-0" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.860568 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dcced35e-d7c2-4f85-9e90-16ab520684b3-config-data\") pod \"ceilometer-0\" (UID: \"dcced35e-d7c2-4f85-9e90-16ab520684b3\") " pod="openstack/ceilometer-0" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.860635 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dcced35e-d7c2-4f85-9e90-16ab520684b3-log-httpd\") pod \"ceilometer-0\" (UID: \"dcced35e-d7c2-4f85-9e90-16ab520684b3\") " pod="openstack/ceilometer-0" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.860785 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dcced35e-d7c2-4f85-9e90-16ab520684b3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"dcced35e-d7c2-4f85-9e90-16ab520684b3\") " pod="openstack/ceilometer-0" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.962366 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dcced35e-d7c2-4f85-9e90-16ab520684b3-config-data\") pod \"ceilometer-0\" (UID: \"dcced35e-d7c2-4f85-9e90-16ab520684b3\") " pod="openstack/ceilometer-0" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.962417 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dcced35e-d7c2-4f85-9e90-16ab520684b3-log-httpd\") pod \"ceilometer-0\" (UID: \"dcced35e-d7c2-4f85-9e90-16ab520684b3\") " pod="openstack/ceilometer-0" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.962452 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dcced35e-d7c2-4f85-9e90-16ab520684b3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"dcced35e-d7c2-4f85-9e90-16ab520684b3\") " pod="openstack/ceilometer-0" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.962487 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dcced35e-d7c2-4f85-9e90-16ab520684b3-run-httpd\") pod \"ceilometer-0\" (UID: \"dcced35e-d7c2-4f85-9e90-16ab520684b3\") " pod="openstack/ceilometer-0" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.962511 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dcced35e-d7c2-4f85-9e90-16ab520684b3-scripts\") pod \"ceilometer-0\" (UID: \"dcced35e-d7c2-4f85-9e90-16ab520684b3\") " pod="openstack/ceilometer-0" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.962528 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96qlt\" (UniqueName: \"kubernetes.io/projected/dcced35e-d7c2-4f85-9e90-16ab520684b3-kube-api-access-96qlt\") pod \"ceilometer-0\" (UID: \"dcced35e-d7c2-4f85-9e90-16ab520684b3\") " pod="openstack/ceilometer-0" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.962618 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dcced35e-d7c2-4f85-9e90-16ab520684b3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"dcced35e-d7c2-4f85-9e90-16ab520684b3\") " pod="openstack/ceilometer-0" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.991920 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dcced35e-d7c2-4f85-9e90-16ab520684b3-run-httpd\") pod \"ceilometer-0\" (UID: \"dcced35e-d7c2-4f85-9e90-16ab520684b3\") " pod="openstack/ceilometer-0" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.993649 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dcced35e-d7c2-4f85-9e90-16ab520684b3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"dcced35e-d7c2-4f85-9e90-16ab520684b3\") " pod="openstack/ceilometer-0" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.997964 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dcced35e-d7c2-4f85-9e90-16ab520684b3-log-httpd\") pod \"ceilometer-0\" (UID: \"dcced35e-d7c2-4f85-9e90-16ab520684b3\") " pod="openstack/ceilometer-0" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.998583 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dcced35e-d7c2-4f85-9e90-16ab520684b3-scripts\") pod \"ceilometer-0\" (UID: \"dcced35e-d7c2-4f85-9e90-16ab520684b3\") " pod="openstack/ceilometer-0" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.999108 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dcced35e-d7c2-4f85-9e90-16ab520684b3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"dcced35e-d7c2-4f85-9e90-16ab520684b3\") " pod="openstack/ceilometer-0" Jan 05 23:36:05 crc kubenswrapper[4910]: I0105 23:36:05.999944 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dcced35e-d7c2-4f85-9e90-16ab520684b3-config-data\") pod \"ceilometer-0\" (UID: \"dcced35e-d7c2-4f85-9e90-16ab520684b3\") " pod="openstack/ceilometer-0" Jan 05 23:36:06 crc kubenswrapper[4910]: I0105 23:36:06.011241 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96qlt\" (UniqueName: \"kubernetes.io/projected/dcced35e-d7c2-4f85-9e90-16ab520684b3-kube-api-access-96qlt\") pod \"ceilometer-0\" (UID: \"dcced35e-d7c2-4f85-9e90-16ab520684b3\") " pod="openstack/ceilometer-0" Jan 05 23:36:06 crc kubenswrapper[4910]: I0105 23:36:06.071639 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 05 23:36:06 crc kubenswrapper[4910]: I0105 23:36:06.681927 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 05 23:36:06 crc kubenswrapper[4910]: I0105 23:36:06.735617 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7d9105dc-6783-4325-87e9-6f6cc9389320" path="/var/lib/kubelet/pods/7d9105dc-6783-4325-87e9-6f6cc9389320/volumes" Jan 05 23:36:07 crc kubenswrapper[4910]: I0105 23:36:07.676715 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dcced35e-d7c2-4f85-9e90-16ab520684b3","Type":"ContainerStarted","Data":"6f0e5e6f3ece09cf8de4dab9747a56b632f641c2d17a2f04c60f03ad068abac7"} Jan 05 23:36:07 crc kubenswrapper[4910]: I0105 23:36:07.677041 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dcced35e-d7c2-4f85-9e90-16ab520684b3","Type":"ContainerStarted","Data":"fb2fef3802fd34f74d55e54f5a1fa6e436191fdfaec02517adf4810b578c273c"} Jan 05 23:36:08 crc kubenswrapper[4910]: I0105 23:36:08.692874 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dcced35e-d7c2-4f85-9e90-16ab520684b3","Type":"ContainerStarted","Data":"2ec8192cef344d4d2adf326b3197a7323f65a37b34a8cfc8763d109d161721d2"} Jan 05 23:36:09 crc kubenswrapper[4910]: I0105 23:36:09.483601 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-scheduler-0" Jan 05 23:36:09 crc kubenswrapper[4910]: I0105 23:36:09.660658 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-share-share1-0" Jan 05 23:36:09 crc kubenswrapper[4910]: I0105 23:36:09.672639 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/manila-api-0" Jan 05 23:36:09 crc kubenswrapper[4910]: I0105 23:36:09.704178 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dcced35e-d7c2-4f85-9e90-16ab520684b3","Type":"ContainerStarted","Data":"d9cde101d44fba081a0b03a70e46efe44749ae43ed09fca6796809c2eae9f6e3"} Jan 05 23:36:10 crc kubenswrapper[4910]: I0105 23:36:10.717744 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dcced35e-d7c2-4f85-9e90-16ab520684b3","Type":"ContainerStarted","Data":"84c059aec95bcf06f60811a9960a1c0f04b8cc56ce6f679b12616c303f3911ca"} Jan 05 23:36:10 crc kubenswrapper[4910]: I0105 23:36:10.718476 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 05 23:36:10 crc kubenswrapper[4910]: I0105 23:36:10.755694 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.521559307 podStartE2EDuration="5.755670324s" podCreationTimestamp="2026-01-05 23:36:05 +0000 UTC" firstStartedPulling="2026-01-05 23:36:06.677990482 +0000 UTC m=+6298.255488172" lastFinishedPulling="2026-01-05 23:36:09.912101519 +0000 UTC m=+6301.489599189" observedRunningTime="2026-01-05 23:36:10.743662198 +0000 UTC m=+6302.321159878" watchObservedRunningTime="2026-01-05 23:36:10.755670324 +0000 UTC m=+6302.333167994" Jan 05 23:36:12 crc kubenswrapper[4910]: I0105 23:36:12.722002 4910 scope.go:117] "RemoveContainer" containerID="f0334e4359831541c2f90c0defd8867b10c126be0235edac4acd76d34a0ba514" Jan 05 23:36:13 crc kubenswrapper[4910]: I0105 23:36:13.757486 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerStarted","Data":"85efc8de819de3d45b60cf4b26ac6a5b91b06bbd1c65b576dff5063a93cada55"} Jan 05 23:36:36 crc kubenswrapper[4910]: I0105 23:36:36.083620 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 05 23:36:48 crc kubenswrapper[4910]: I0105 23:36:48.657861 4910 scope.go:117] "RemoveContainer" containerID="89e008f163364a429cd3cadace1419f216793c201556df0af9a91dfa21e5469e" Jan 05 23:36:48 crc kubenswrapper[4910]: I0105 23:36:48.697688 4910 scope.go:117] "RemoveContainer" containerID="ae8b83f5aa12df97c5b77ab0a4a4ac77e9ed811358667ee2f2457941015acc11" Jan 05 23:36:48 crc kubenswrapper[4910]: I0105 23:36:48.744561 4910 scope.go:117] "RemoveContainer" containerID="81f685f932e5b8ee2f68f44a7008394ac43664d536429818e11fa1588128ed62" Jan 05 23:36:56 crc kubenswrapper[4910]: I0105 23:36:56.945219 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7764dc59f9-rw8cg"] Jan 05 23:36:56 crc kubenswrapper[4910]: I0105 23:36:56.947401 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7764dc59f9-rw8cg" Jan 05 23:36:56 crc kubenswrapper[4910]: I0105 23:36:56.949685 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1" Jan 05 23:36:56 crc kubenswrapper[4910]: I0105 23:36:56.972711 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7764dc59f9-rw8cg"] Jan 05 23:36:57 crc kubenswrapper[4910]: I0105 23:36:57.068291 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/93f41ba6-87ac-4772-9c15-daab77bf642f-ovsdbserver-nb\") pod \"dnsmasq-dns-7764dc59f9-rw8cg\" (UID: \"93f41ba6-87ac-4772-9c15-daab77bf642f\") " pod="openstack/dnsmasq-dns-7764dc59f9-rw8cg" Jan 05 23:36:57 crc kubenswrapper[4910]: I0105 23:36:57.068369 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/93f41ba6-87ac-4772-9c15-daab77bf642f-dns-svc\") pod \"dnsmasq-dns-7764dc59f9-rw8cg\" (UID: \"93f41ba6-87ac-4772-9c15-daab77bf642f\") " pod="openstack/dnsmasq-dns-7764dc59f9-rw8cg" Jan 05 23:36:57 crc kubenswrapper[4910]: I0105 23:36:57.068462 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/93f41ba6-87ac-4772-9c15-daab77bf642f-openstack-cell1\") pod \"dnsmasq-dns-7764dc59f9-rw8cg\" (UID: \"93f41ba6-87ac-4772-9c15-daab77bf642f\") " pod="openstack/dnsmasq-dns-7764dc59f9-rw8cg" Jan 05 23:36:57 crc kubenswrapper[4910]: I0105 23:36:57.068506 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/93f41ba6-87ac-4772-9c15-daab77bf642f-ovsdbserver-sb\") pod \"dnsmasq-dns-7764dc59f9-rw8cg\" (UID: \"93f41ba6-87ac-4772-9c15-daab77bf642f\") " pod="openstack/dnsmasq-dns-7764dc59f9-rw8cg" Jan 05 23:36:57 crc kubenswrapper[4910]: I0105 23:36:57.068551 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pzfjl\" (UniqueName: \"kubernetes.io/projected/93f41ba6-87ac-4772-9c15-daab77bf642f-kube-api-access-pzfjl\") pod \"dnsmasq-dns-7764dc59f9-rw8cg\" (UID: \"93f41ba6-87ac-4772-9c15-daab77bf642f\") " pod="openstack/dnsmasq-dns-7764dc59f9-rw8cg" Jan 05 23:36:57 crc kubenswrapper[4910]: I0105 23:36:57.068581 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/93f41ba6-87ac-4772-9c15-daab77bf642f-config\") pod \"dnsmasq-dns-7764dc59f9-rw8cg\" (UID: \"93f41ba6-87ac-4772-9c15-daab77bf642f\") " pod="openstack/dnsmasq-dns-7764dc59f9-rw8cg" Jan 05 23:36:57 crc kubenswrapper[4910]: I0105 23:36:57.171522 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/93f41ba6-87ac-4772-9c15-daab77bf642f-openstack-cell1\") pod \"dnsmasq-dns-7764dc59f9-rw8cg\" (UID: \"93f41ba6-87ac-4772-9c15-daab77bf642f\") " pod="openstack/dnsmasq-dns-7764dc59f9-rw8cg" Jan 05 23:36:57 crc kubenswrapper[4910]: I0105 23:36:57.173242 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/93f41ba6-87ac-4772-9c15-daab77bf642f-openstack-cell1\") pod \"dnsmasq-dns-7764dc59f9-rw8cg\" (UID: \"93f41ba6-87ac-4772-9c15-daab77bf642f\") " pod="openstack/dnsmasq-dns-7764dc59f9-rw8cg" Jan 05 23:36:57 crc kubenswrapper[4910]: I0105 23:36:57.173467 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/93f41ba6-87ac-4772-9c15-daab77bf642f-ovsdbserver-sb\") pod \"dnsmasq-dns-7764dc59f9-rw8cg\" (UID: \"93f41ba6-87ac-4772-9c15-daab77bf642f\") " pod="openstack/dnsmasq-dns-7764dc59f9-rw8cg" Jan 05 23:36:57 crc kubenswrapper[4910]: I0105 23:36:57.173620 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pzfjl\" (UniqueName: \"kubernetes.io/projected/93f41ba6-87ac-4772-9c15-daab77bf642f-kube-api-access-pzfjl\") pod \"dnsmasq-dns-7764dc59f9-rw8cg\" (UID: \"93f41ba6-87ac-4772-9c15-daab77bf642f\") " pod="openstack/dnsmasq-dns-7764dc59f9-rw8cg" Jan 05 23:36:57 crc kubenswrapper[4910]: I0105 23:36:57.173710 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/93f41ba6-87ac-4772-9c15-daab77bf642f-config\") pod \"dnsmasq-dns-7764dc59f9-rw8cg\" (UID: \"93f41ba6-87ac-4772-9c15-daab77bf642f\") " pod="openstack/dnsmasq-dns-7764dc59f9-rw8cg" Jan 05 23:36:57 crc kubenswrapper[4910]: I0105 23:36:57.173805 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/93f41ba6-87ac-4772-9c15-daab77bf642f-ovsdbserver-nb\") pod \"dnsmasq-dns-7764dc59f9-rw8cg\" (UID: \"93f41ba6-87ac-4772-9c15-daab77bf642f\") " pod="openstack/dnsmasq-dns-7764dc59f9-rw8cg" Jan 05 23:36:57 crc kubenswrapper[4910]: I0105 23:36:57.173883 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/93f41ba6-87ac-4772-9c15-daab77bf642f-dns-svc\") pod \"dnsmasq-dns-7764dc59f9-rw8cg\" (UID: \"93f41ba6-87ac-4772-9c15-daab77bf642f\") " pod="openstack/dnsmasq-dns-7764dc59f9-rw8cg" Jan 05 23:36:57 crc kubenswrapper[4910]: I0105 23:36:57.174618 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/93f41ba6-87ac-4772-9c15-daab77bf642f-dns-svc\") pod \"dnsmasq-dns-7764dc59f9-rw8cg\" (UID: \"93f41ba6-87ac-4772-9c15-daab77bf642f\") " pod="openstack/dnsmasq-dns-7764dc59f9-rw8cg" Jan 05 23:36:57 crc kubenswrapper[4910]: I0105 23:36:57.174635 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/93f41ba6-87ac-4772-9c15-daab77bf642f-config\") pod \"dnsmasq-dns-7764dc59f9-rw8cg\" (UID: \"93f41ba6-87ac-4772-9c15-daab77bf642f\") " pod="openstack/dnsmasq-dns-7764dc59f9-rw8cg" Jan 05 23:36:57 crc kubenswrapper[4910]: I0105 23:36:57.174749 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/93f41ba6-87ac-4772-9c15-daab77bf642f-ovsdbserver-sb\") pod \"dnsmasq-dns-7764dc59f9-rw8cg\" (UID: \"93f41ba6-87ac-4772-9c15-daab77bf642f\") " pod="openstack/dnsmasq-dns-7764dc59f9-rw8cg" Jan 05 23:36:57 crc kubenswrapper[4910]: I0105 23:36:57.175932 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/93f41ba6-87ac-4772-9c15-daab77bf642f-ovsdbserver-nb\") pod \"dnsmasq-dns-7764dc59f9-rw8cg\" (UID: \"93f41ba6-87ac-4772-9c15-daab77bf642f\") " pod="openstack/dnsmasq-dns-7764dc59f9-rw8cg" Jan 05 23:36:57 crc kubenswrapper[4910]: I0105 23:36:57.195251 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pzfjl\" (UniqueName: \"kubernetes.io/projected/93f41ba6-87ac-4772-9c15-daab77bf642f-kube-api-access-pzfjl\") pod \"dnsmasq-dns-7764dc59f9-rw8cg\" (UID: \"93f41ba6-87ac-4772-9c15-daab77bf642f\") " pod="openstack/dnsmasq-dns-7764dc59f9-rw8cg" Jan 05 23:36:57 crc kubenswrapper[4910]: I0105 23:36:57.272169 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7764dc59f9-rw8cg" Jan 05 23:36:57 crc kubenswrapper[4910]: I0105 23:36:57.831624 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7764dc59f9-rw8cg"] Jan 05 23:36:58 crc kubenswrapper[4910]: I0105 23:36:58.302879 4910 generic.go:334] "Generic (PLEG): container finished" podID="93f41ba6-87ac-4772-9c15-daab77bf642f" containerID="cce1460fc0d17265e00070687fb0ba1f316fa19aac8386da19e05d7b9c4e2365" exitCode=0 Jan 05 23:36:58 crc kubenswrapper[4910]: I0105 23:36:58.304895 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7764dc59f9-rw8cg" event={"ID":"93f41ba6-87ac-4772-9c15-daab77bf642f","Type":"ContainerDied","Data":"cce1460fc0d17265e00070687fb0ba1f316fa19aac8386da19e05d7b9c4e2365"} Jan 05 23:36:58 crc kubenswrapper[4910]: I0105 23:36:58.304956 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7764dc59f9-rw8cg" event={"ID":"93f41ba6-87ac-4772-9c15-daab77bf642f","Type":"ContainerStarted","Data":"249eb65e557d7b0e54afe9deffdc9e6ebb3891e893fe126cbaab5c569cdf1986"} Jan 05 23:36:59 crc kubenswrapper[4910]: I0105 23:36:59.317467 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7764dc59f9-rw8cg" event={"ID":"93f41ba6-87ac-4772-9c15-daab77bf642f","Type":"ContainerStarted","Data":"2ecdc7a9d201b71225e7d2bc81d061054255db5c4131a9a2724d7405a0832e73"} Jan 05 23:36:59 crc kubenswrapper[4910]: I0105 23:36:59.317782 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7764dc59f9-rw8cg" Jan 05 23:36:59 crc kubenswrapper[4910]: I0105 23:36:59.338110 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7764dc59f9-rw8cg" podStartSLOduration=3.338091263 podStartE2EDuration="3.338091263s" podCreationTimestamp="2026-01-05 23:36:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:36:59.332588072 +0000 UTC m=+6350.910085742" watchObservedRunningTime="2026-01-05 23:36:59.338091263 +0000 UTC m=+6350.915588933" Jan 05 23:37:07 crc kubenswrapper[4910]: I0105 23:37:07.274381 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7764dc59f9-rw8cg" Jan 05 23:37:07 crc kubenswrapper[4910]: I0105 23:37:07.396079 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-64977c6f6c-bmckg"] Jan 05 23:37:07 crc kubenswrapper[4910]: I0105 23:37:07.396506 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-64977c6f6c-bmckg" podUID="395236e1-a608-4655-b98a-80166634b17a" containerName="dnsmasq-dns" containerID="cri-o://997b7bd68313c3820caae4772b4c17ad6252d7a6006895d0a6b20fd7ed7ee588" gracePeriod=10 Jan 05 23:37:07 crc kubenswrapper[4910]: I0105 23:37:07.544009 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-579d455669-7z4vm"] Jan 05 23:37:07 crc kubenswrapper[4910]: I0105 23:37:07.546086 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-579d455669-7z4vm" Jan 05 23:37:07 crc kubenswrapper[4910]: I0105 23:37:07.591321 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-579d455669-7z4vm"] Jan 05 23:37:07 crc kubenswrapper[4910]: I0105 23:37:07.656486 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0a3c74f1-a391-4472-9ff6-8d72a85f41d5-ovsdbserver-sb\") pod \"dnsmasq-dns-579d455669-7z4vm\" (UID: \"0a3c74f1-a391-4472-9ff6-8d72a85f41d5\") " pod="openstack/dnsmasq-dns-579d455669-7z4vm" Jan 05 23:37:07 crc kubenswrapper[4910]: I0105 23:37:07.656674 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0a3c74f1-a391-4472-9ff6-8d72a85f41d5-dns-svc\") pod \"dnsmasq-dns-579d455669-7z4vm\" (UID: \"0a3c74f1-a391-4472-9ff6-8d72a85f41d5\") " pod="openstack/dnsmasq-dns-579d455669-7z4vm" Jan 05 23:37:07 crc kubenswrapper[4910]: I0105 23:37:07.656843 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9wqbf\" (UniqueName: \"kubernetes.io/projected/0a3c74f1-a391-4472-9ff6-8d72a85f41d5-kube-api-access-9wqbf\") pod \"dnsmasq-dns-579d455669-7z4vm\" (UID: \"0a3c74f1-a391-4472-9ff6-8d72a85f41d5\") " pod="openstack/dnsmasq-dns-579d455669-7z4vm" Jan 05 23:37:07 crc kubenswrapper[4910]: I0105 23:37:07.656888 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0a3c74f1-a391-4472-9ff6-8d72a85f41d5-ovsdbserver-nb\") pod \"dnsmasq-dns-579d455669-7z4vm\" (UID: \"0a3c74f1-a391-4472-9ff6-8d72a85f41d5\") " pod="openstack/dnsmasq-dns-579d455669-7z4vm" Jan 05 23:37:07 crc kubenswrapper[4910]: I0105 23:37:07.656983 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/0a3c74f1-a391-4472-9ff6-8d72a85f41d5-openstack-cell1\") pod \"dnsmasq-dns-579d455669-7z4vm\" (UID: \"0a3c74f1-a391-4472-9ff6-8d72a85f41d5\") " pod="openstack/dnsmasq-dns-579d455669-7z4vm" Jan 05 23:37:07 crc kubenswrapper[4910]: I0105 23:37:07.657040 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0a3c74f1-a391-4472-9ff6-8d72a85f41d5-config\") pod \"dnsmasq-dns-579d455669-7z4vm\" (UID: \"0a3c74f1-a391-4472-9ff6-8d72a85f41d5\") " pod="openstack/dnsmasq-dns-579d455669-7z4vm" Jan 05 23:37:07 crc kubenswrapper[4910]: I0105 23:37:07.814368 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0a3c74f1-a391-4472-9ff6-8d72a85f41d5-ovsdbserver-sb\") pod \"dnsmasq-dns-579d455669-7z4vm\" (UID: \"0a3c74f1-a391-4472-9ff6-8d72a85f41d5\") " pod="openstack/dnsmasq-dns-579d455669-7z4vm" Jan 05 23:37:07 crc kubenswrapper[4910]: I0105 23:37:07.814442 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0a3c74f1-a391-4472-9ff6-8d72a85f41d5-dns-svc\") pod \"dnsmasq-dns-579d455669-7z4vm\" (UID: \"0a3c74f1-a391-4472-9ff6-8d72a85f41d5\") " pod="openstack/dnsmasq-dns-579d455669-7z4vm" Jan 05 23:37:07 crc kubenswrapper[4910]: I0105 23:37:07.814550 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9wqbf\" (UniqueName: \"kubernetes.io/projected/0a3c74f1-a391-4472-9ff6-8d72a85f41d5-kube-api-access-9wqbf\") pod \"dnsmasq-dns-579d455669-7z4vm\" (UID: \"0a3c74f1-a391-4472-9ff6-8d72a85f41d5\") " pod="openstack/dnsmasq-dns-579d455669-7z4vm" Jan 05 23:37:07 crc kubenswrapper[4910]: I0105 23:37:07.814573 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0a3c74f1-a391-4472-9ff6-8d72a85f41d5-ovsdbserver-nb\") pod \"dnsmasq-dns-579d455669-7z4vm\" (UID: \"0a3c74f1-a391-4472-9ff6-8d72a85f41d5\") " pod="openstack/dnsmasq-dns-579d455669-7z4vm" Jan 05 23:37:07 crc kubenswrapper[4910]: I0105 23:37:07.814622 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/0a3c74f1-a391-4472-9ff6-8d72a85f41d5-openstack-cell1\") pod \"dnsmasq-dns-579d455669-7z4vm\" (UID: \"0a3c74f1-a391-4472-9ff6-8d72a85f41d5\") " pod="openstack/dnsmasq-dns-579d455669-7z4vm" Jan 05 23:37:07 crc kubenswrapper[4910]: I0105 23:37:07.814641 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0a3c74f1-a391-4472-9ff6-8d72a85f41d5-config\") pod \"dnsmasq-dns-579d455669-7z4vm\" (UID: \"0a3c74f1-a391-4472-9ff6-8d72a85f41d5\") " pod="openstack/dnsmasq-dns-579d455669-7z4vm" Jan 05 23:37:07 crc kubenswrapper[4910]: I0105 23:37:07.815767 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0a3c74f1-a391-4472-9ff6-8d72a85f41d5-config\") pod \"dnsmasq-dns-579d455669-7z4vm\" (UID: \"0a3c74f1-a391-4472-9ff6-8d72a85f41d5\") " pod="openstack/dnsmasq-dns-579d455669-7z4vm" Jan 05 23:37:07 crc kubenswrapper[4910]: I0105 23:37:07.819940 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0a3c74f1-a391-4472-9ff6-8d72a85f41d5-ovsdbserver-nb\") pod \"dnsmasq-dns-579d455669-7z4vm\" (UID: \"0a3c74f1-a391-4472-9ff6-8d72a85f41d5\") " pod="openstack/dnsmasq-dns-579d455669-7z4vm" Jan 05 23:37:07 crc kubenswrapper[4910]: I0105 23:37:07.820539 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0a3c74f1-a391-4472-9ff6-8d72a85f41d5-dns-svc\") pod \"dnsmasq-dns-579d455669-7z4vm\" (UID: \"0a3c74f1-a391-4472-9ff6-8d72a85f41d5\") " pod="openstack/dnsmasq-dns-579d455669-7z4vm" Jan 05 23:37:07 crc kubenswrapper[4910]: I0105 23:37:07.821348 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/0a3c74f1-a391-4472-9ff6-8d72a85f41d5-openstack-cell1\") pod \"dnsmasq-dns-579d455669-7z4vm\" (UID: \"0a3c74f1-a391-4472-9ff6-8d72a85f41d5\") " pod="openstack/dnsmasq-dns-579d455669-7z4vm" Jan 05 23:37:07 crc kubenswrapper[4910]: I0105 23:37:07.823219 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0a3c74f1-a391-4472-9ff6-8d72a85f41d5-ovsdbserver-sb\") pod \"dnsmasq-dns-579d455669-7z4vm\" (UID: \"0a3c74f1-a391-4472-9ff6-8d72a85f41d5\") " pod="openstack/dnsmasq-dns-579d455669-7z4vm" Jan 05 23:37:07 crc kubenswrapper[4910]: I0105 23:37:07.868034 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9wqbf\" (UniqueName: \"kubernetes.io/projected/0a3c74f1-a391-4472-9ff6-8d72a85f41d5-kube-api-access-9wqbf\") pod \"dnsmasq-dns-579d455669-7z4vm\" (UID: \"0a3c74f1-a391-4472-9ff6-8d72a85f41d5\") " pod="openstack/dnsmasq-dns-579d455669-7z4vm" Jan 05 23:37:07 crc kubenswrapper[4910]: I0105 23:37:07.882800 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-579d455669-7z4vm" Jan 05 23:37:08 crc kubenswrapper[4910]: I0105 23:37:08.072740 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64977c6f6c-bmckg" Jan 05 23:37:08 crc kubenswrapper[4910]: I0105 23:37:08.226852 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/395236e1-a608-4655-b98a-80166634b17a-dns-svc\") pod \"395236e1-a608-4655-b98a-80166634b17a\" (UID: \"395236e1-a608-4655-b98a-80166634b17a\") " Jan 05 23:37:08 crc kubenswrapper[4910]: I0105 23:37:08.227364 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/395236e1-a608-4655-b98a-80166634b17a-ovsdbserver-nb\") pod \"395236e1-a608-4655-b98a-80166634b17a\" (UID: \"395236e1-a608-4655-b98a-80166634b17a\") " Jan 05 23:37:08 crc kubenswrapper[4910]: I0105 23:37:08.227408 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zd4p2\" (UniqueName: \"kubernetes.io/projected/395236e1-a608-4655-b98a-80166634b17a-kube-api-access-zd4p2\") pod \"395236e1-a608-4655-b98a-80166634b17a\" (UID: \"395236e1-a608-4655-b98a-80166634b17a\") " Jan 05 23:37:08 crc kubenswrapper[4910]: I0105 23:37:08.227458 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/395236e1-a608-4655-b98a-80166634b17a-config\") pod \"395236e1-a608-4655-b98a-80166634b17a\" (UID: \"395236e1-a608-4655-b98a-80166634b17a\") " Jan 05 23:37:08 crc kubenswrapper[4910]: I0105 23:37:08.227480 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/395236e1-a608-4655-b98a-80166634b17a-ovsdbserver-sb\") pod \"395236e1-a608-4655-b98a-80166634b17a\" (UID: \"395236e1-a608-4655-b98a-80166634b17a\") " Jan 05 23:37:08 crc kubenswrapper[4910]: I0105 23:37:08.240799 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/395236e1-a608-4655-b98a-80166634b17a-kube-api-access-zd4p2" (OuterVolumeSpecName: "kube-api-access-zd4p2") pod "395236e1-a608-4655-b98a-80166634b17a" (UID: "395236e1-a608-4655-b98a-80166634b17a"). InnerVolumeSpecName "kube-api-access-zd4p2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:37:08 crc kubenswrapper[4910]: I0105 23:37:08.330196 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zd4p2\" (UniqueName: \"kubernetes.io/projected/395236e1-a608-4655-b98a-80166634b17a-kube-api-access-zd4p2\") on node \"crc\" DevicePath \"\"" Jan 05 23:37:08 crc kubenswrapper[4910]: I0105 23:37:08.334814 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/395236e1-a608-4655-b98a-80166634b17a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "395236e1-a608-4655-b98a-80166634b17a" (UID: "395236e1-a608-4655-b98a-80166634b17a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:37:08 crc kubenswrapper[4910]: I0105 23:37:08.348274 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/395236e1-a608-4655-b98a-80166634b17a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "395236e1-a608-4655-b98a-80166634b17a" (UID: "395236e1-a608-4655-b98a-80166634b17a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:37:08 crc kubenswrapper[4910]: I0105 23:37:08.348738 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/395236e1-a608-4655-b98a-80166634b17a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "395236e1-a608-4655-b98a-80166634b17a" (UID: "395236e1-a608-4655-b98a-80166634b17a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:37:08 crc kubenswrapper[4910]: I0105 23:37:08.362633 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/395236e1-a608-4655-b98a-80166634b17a-config" (OuterVolumeSpecName: "config") pod "395236e1-a608-4655-b98a-80166634b17a" (UID: "395236e1-a608-4655-b98a-80166634b17a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:37:08 crc kubenswrapper[4910]: I0105 23:37:08.433301 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/395236e1-a608-4655-b98a-80166634b17a-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 05 23:37:08 crc kubenswrapper[4910]: I0105 23:37:08.433552 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/395236e1-a608-4655-b98a-80166634b17a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 05 23:37:08 crc kubenswrapper[4910]: I0105 23:37:08.433564 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/395236e1-a608-4655-b98a-80166634b17a-config\") on node \"crc\" DevicePath \"\"" Jan 05 23:37:08 crc kubenswrapper[4910]: I0105 23:37:08.433574 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/395236e1-a608-4655-b98a-80166634b17a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 05 23:37:08 crc kubenswrapper[4910]: I0105 23:37:08.446941 4910 generic.go:334] "Generic (PLEG): container finished" podID="395236e1-a608-4655-b98a-80166634b17a" containerID="997b7bd68313c3820caae4772b4c17ad6252d7a6006895d0a6b20fd7ed7ee588" exitCode=0 Jan 05 23:37:08 crc kubenswrapper[4910]: I0105 23:37:08.446981 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64977c6f6c-bmckg" event={"ID":"395236e1-a608-4655-b98a-80166634b17a","Type":"ContainerDied","Data":"997b7bd68313c3820caae4772b4c17ad6252d7a6006895d0a6b20fd7ed7ee588"} Jan 05 23:37:08 crc kubenswrapper[4910]: I0105 23:37:08.447009 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64977c6f6c-bmckg" event={"ID":"395236e1-a608-4655-b98a-80166634b17a","Type":"ContainerDied","Data":"967a557d1e44cf2b5026bac3638ff46b1a7bceb59f8f0064abff041785280f6b"} Jan 05 23:37:08 crc kubenswrapper[4910]: I0105 23:37:08.447027 4910 scope.go:117] "RemoveContainer" containerID="997b7bd68313c3820caae4772b4c17ad6252d7a6006895d0a6b20fd7ed7ee588" Jan 05 23:37:08 crc kubenswrapper[4910]: I0105 23:37:08.447173 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64977c6f6c-bmckg" Jan 05 23:37:08 crc kubenswrapper[4910]: I0105 23:37:08.483330 4910 scope.go:117] "RemoveContainer" containerID="a767872039910c88bead140428a66d9e014645a67e3b0a3ca308abbe270ee0a6" Jan 05 23:37:08 crc kubenswrapper[4910]: I0105 23:37:08.487371 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-64977c6f6c-bmckg"] Jan 05 23:37:08 crc kubenswrapper[4910]: I0105 23:37:08.494902 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-64977c6f6c-bmckg"] Jan 05 23:37:08 crc kubenswrapper[4910]: I0105 23:37:08.508806 4910 scope.go:117] "RemoveContainer" containerID="997b7bd68313c3820caae4772b4c17ad6252d7a6006895d0a6b20fd7ed7ee588" Jan 05 23:37:08 crc kubenswrapper[4910]: E0105 23:37:08.509311 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"997b7bd68313c3820caae4772b4c17ad6252d7a6006895d0a6b20fd7ed7ee588\": container with ID starting with 997b7bd68313c3820caae4772b4c17ad6252d7a6006895d0a6b20fd7ed7ee588 not found: ID does not exist" containerID="997b7bd68313c3820caae4772b4c17ad6252d7a6006895d0a6b20fd7ed7ee588" Jan 05 23:37:08 crc kubenswrapper[4910]: I0105 23:37:08.509369 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"997b7bd68313c3820caae4772b4c17ad6252d7a6006895d0a6b20fd7ed7ee588"} err="failed to get container status \"997b7bd68313c3820caae4772b4c17ad6252d7a6006895d0a6b20fd7ed7ee588\": rpc error: code = NotFound desc = could not find container \"997b7bd68313c3820caae4772b4c17ad6252d7a6006895d0a6b20fd7ed7ee588\": container with ID starting with 997b7bd68313c3820caae4772b4c17ad6252d7a6006895d0a6b20fd7ed7ee588 not found: ID does not exist" Jan 05 23:37:08 crc kubenswrapper[4910]: I0105 23:37:08.509405 4910 scope.go:117] "RemoveContainer" containerID="a767872039910c88bead140428a66d9e014645a67e3b0a3ca308abbe270ee0a6" Jan 05 23:37:08 crc kubenswrapper[4910]: E0105 23:37:08.509777 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a767872039910c88bead140428a66d9e014645a67e3b0a3ca308abbe270ee0a6\": container with ID starting with a767872039910c88bead140428a66d9e014645a67e3b0a3ca308abbe270ee0a6 not found: ID does not exist" containerID="a767872039910c88bead140428a66d9e014645a67e3b0a3ca308abbe270ee0a6" Jan 05 23:37:08 crc kubenswrapper[4910]: I0105 23:37:08.509808 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a767872039910c88bead140428a66d9e014645a67e3b0a3ca308abbe270ee0a6"} err="failed to get container status \"a767872039910c88bead140428a66d9e014645a67e3b0a3ca308abbe270ee0a6\": rpc error: code = NotFound desc = could not find container \"a767872039910c88bead140428a66d9e014645a67e3b0a3ca308abbe270ee0a6\": container with ID starting with a767872039910c88bead140428a66d9e014645a67e3b0a3ca308abbe270ee0a6 not found: ID does not exist" Jan 05 23:37:08 crc kubenswrapper[4910]: I0105 23:37:08.574540 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-579d455669-7z4vm"] Jan 05 23:37:08 crc kubenswrapper[4910]: I0105 23:37:08.761018 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="395236e1-a608-4655-b98a-80166634b17a" path="/var/lib/kubelet/pods/395236e1-a608-4655-b98a-80166634b17a/volumes" Jan 05 23:37:09 crc kubenswrapper[4910]: I0105 23:37:09.461236 4910 generic.go:334] "Generic (PLEG): container finished" podID="0a3c74f1-a391-4472-9ff6-8d72a85f41d5" containerID="c995dbbc5455c45a33a8f2b01b0961af2c416a85824d2b9fcca65e4564f41656" exitCode=0 Jan 05 23:37:09 crc kubenswrapper[4910]: I0105 23:37:09.461316 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-579d455669-7z4vm" event={"ID":"0a3c74f1-a391-4472-9ff6-8d72a85f41d5","Type":"ContainerDied","Data":"c995dbbc5455c45a33a8f2b01b0961af2c416a85824d2b9fcca65e4564f41656"} Jan 05 23:37:09 crc kubenswrapper[4910]: I0105 23:37:09.461588 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-579d455669-7z4vm" event={"ID":"0a3c74f1-a391-4472-9ff6-8d72a85f41d5","Type":"ContainerStarted","Data":"a3bbbd2763bc5cca792f5f8563ea2163395a578e7fbacbd31d4e4f4f7e29256c"} Jan 05 23:37:10 crc kubenswrapper[4910]: I0105 23:37:10.481440 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-579d455669-7z4vm" event={"ID":"0a3c74f1-a391-4472-9ff6-8d72a85f41d5","Type":"ContainerStarted","Data":"c6a5e8540ce4c7998a8522b1a424a07a4312fce3c95f6a46ae84554fdb077c46"} Jan 05 23:37:10 crc kubenswrapper[4910]: I0105 23:37:10.482156 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-579d455669-7z4vm" Jan 05 23:37:10 crc kubenswrapper[4910]: I0105 23:37:10.527158 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-579d455669-7z4vm" podStartSLOduration=3.527083831 podStartE2EDuration="3.527083831s" podCreationTimestamp="2026-01-05 23:37:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:37:10.508909868 +0000 UTC m=+6362.086407578" watchObservedRunningTime="2026-01-05 23:37:10.527083831 +0000 UTC m=+6362.104581541" Jan 05 23:37:17 crc kubenswrapper[4910]: I0105 23:37:17.885458 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-579d455669-7z4vm" Jan 05 23:37:17 crc kubenswrapper[4910]: I0105 23:37:17.991230 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7764dc59f9-rw8cg"] Jan 05 23:37:17 crc kubenswrapper[4910]: I0105 23:37:17.991783 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7764dc59f9-rw8cg" podUID="93f41ba6-87ac-4772-9c15-daab77bf642f" containerName="dnsmasq-dns" containerID="cri-o://2ecdc7a9d201b71225e7d2bc81d061054255db5c4131a9a2724d7405a0832e73" gracePeriod=10 Jan 05 23:37:18 crc kubenswrapper[4910]: I0105 23:37:18.514935 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7764dc59f9-rw8cg" Jan 05 23:37:18 crc kubenswrapper[4910]: I0105 23:37:18.581812 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/93f41ba6-87ac-4772-9c15-daab77bf642f-config\") pod \"93f41ba6-87ac-4772-9c15-daab77bf642f\" (UID: \"93f41ba6-87ac-4772-9c15-daab77bf642f\") " Jan 05 23:37:18 crc kubenswrapper[4910]: I0105 23:37:18.582083 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/93f41ba6-87ac-4772-9c15-daab77bf642f-ovsdbserver-sb\") pod \"93f41ba6-87ac-4772-9c15-daab77bf642f\" (UID: \"93f41ba6-87ac-4772-9c15-daab77bf642f\") " Jan 05 23:37:18 crc kubenswrapper[4910]: I0105 23:37:18.582144 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/93f41ba6-87ac-4772-9c15-daab77bf642f-dns-svc\") pod \"93f41ba6-87ac-4772-9c15-daab77bf642f\" (UID: \"93f41ba6-87ac-4772-9c15-daab77bf642f\") " Jan 05 23:37:18 crc kubenswrapper[4910]: I0105 23:37:18.582237 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pzfjl\" (UniqueName: \"kubernetes.io/projected/93f41ba6-87ac-4772-9c15-daab77bf642f-kube-api-access-pzfjl\") pod \"93f41ba6-87ac-4772-9c15-daab77bf642f\" (UID: \"93f41ba6-87ac-4772-9c15-daab77bf642f\") " Jan 05 23:37:18 crc kubenswrapper[4910]: I0105 23:37:18.582279 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/93f41ba6-87ac-4772-9c15-daab77bf642f-ovsdbserver-nb\") pod \"93f41ba6-87ac-4772-9c15-daab77bf642f\" (UID: \"93f41ba6-87ac-4772-9c15-daab77bf642f\") " Jan 05 23:37:18 crc kubenswrapper[4910]: I0105 23:37:18.582352 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/93f41ba6-87ac-4772-9c15-daab77bf642f-openstack-cell1\") pod \"93f41ba6-87ac-4772-9c15-daab77bf642f\" (UID: \"93f41ba6-87ac-4772-9c15-daab77bf642f\") " Jan 05 23:37:18 crc kubenswrapper[4910]: I0105 23:37:18.594567 4910 generic.go:334] "Generic (PLEG): container finished" podID="93f41ba6-87ac-4772-9c15-daab77bf642f" containerID="2ecdc7a9d201b71225e7d2bc81d061054255db5c4131a9a2724d7405a0832e73" exitCode=0 Jan 05 23:37:18 crc kubenswrapper[4910]: I0105 23:37:18.594610 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7764dc59f9-rw8cg" event={"ID":"93f41ba6-87ac-4772-9c15-daab77bf642f","Type":"ContainerDied","Data":"2ecdc7a9d201b71225e7d2bc81d061054255db5c4131a9a2724d7405a0832e73"} Jan 05 23:37:18 crc kubenswrapper[4910]: I0105 23:37:18.594640 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7764dc59f9-rw8cg" event={"ID":"93f41ba6-87ac-4772-9c15-daab77bf642f","Type":"ContainerDied","Data":"249eb65e557d7b0e54afe9deffdc9e6ebb3891e893fe126cbaab5c569cdf1986"} Jan 05 23:37:18 crc kubenswrapper[4910]: I0105 23:37:18.594660 4910 scope.go:117] "RemoveContainer" containerID="2ecdc7a9d201b71225e7d2bc81d061054255db5c4131a9a2724d7405a0832e73" Jan 05 23:37:18 crc kubenswrapper[4910]: I0105 23:37:18.594786 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7764dc59f9-rw8cg" Jan 05 23:37:18 crc kubenswrapper[4910]: I0105 23:37:18.602665 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93f41ba6-87ac-4772-9c15-daab77bf642f-kube-api-access-pzfjl" (OuterVolumeSpecName: "kube-api-access-pzfjl") pod "93f41ba6-87ac-4772-9c15-daab77bf642f" (UID: "93f41ba6-87ac-4772-9c15-daab77bf642f"). InnerVolumeSpecName "kube-api-access-pzfjl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:37:18 crc kubenswrapper[4910]: I0105 23:37:18.645292 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/93f41ba6-87ac-4772-9c15-daab77bf642f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "93f41ba6-87ac-4772-9c15-daab77bf642f" (UID: "93f41ba6-87ac-4772-9c15-daab77bf642f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:37:18 crc kubenswrapper[4910]: I0105 23:37:18.653596 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/93f41ba6-87ac-4772-9c15-daab77bf642f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "93f41ba6-87ac-4772-9c15-daab77bf642f" (UID: "93f41ba6-87ac-4772-9c15-daab77bf642f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:37:18 crc kubenswrapper[4910]: I0105 23:37:18.653872 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/93f41ba6-87ac-4772-9c15-daab77bf642f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "93f41ba6-87ac-4772-9c15-daab77bf642f" (UID: "93f41ba6-87ac-4772-9c15-daab77bf642f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:37:18 crc kubenswrapper[4910]: I0105 23:37:18.662023 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/93f41ba6-87ac-4772-9c15-daab77bf642f-config" (OuterVolumeSpecName: "config") pod "93f41ba6-87ac-4772-9c15-daab77bf642f" (UID: "93f41ba6-87ac-4772-9c15-daab77bf642f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:37:18 crc kubenswrapper[4910]: I0105 23:37:18.671827 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/93f41ba6-87ac-4772-9c15-daab77bf642f-openstack-cell1" (OuterVolumeSpecName: "openstack-cell1") pod "93f41ba6-87ac-4772-9c15-daab77bf642f" (UID: "93f41ba6-87ac-4772-9c15-daab77bf642f"). InnerVolumeSpecName "openstack-cell1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:37:18 crc kubenswrapper[4910]: I0105 23:37:18.685432 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/93f41ba6-87ac-4772-9c15-daab77bf642f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 05 23:37:18 crc kubenswrapper[4910]: I0105 23:37:18.685467 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/93f41ba6-87ac-4772-9c15-daab77bf642f-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 05 23:37:18 crc kubenswrapper[4910]: I0105 23:37:18.685479 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pzfjl\" (UniqueName: \"kubernetes.io/projected/93f41ba6-87ac-4772-9c15-daab77bf642f-kube-api-access-pzfjl\") on node \"crc\" DevicePath \"\"" Jan 05 23:37:18 crc kubenswrapper[4910]: I0105 23:37:18.685491 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/93f41ba6-87ac-4772-9c15-daab77bf642f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 05 23:37:18 crc kubenswrapper[4910]: I0105 23:37:18.685499 4910 reconciler_common.go:293] "Volume detached for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/93f41ba6-87ac-4772-9c15-daab77bf642f-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 05 23:37:18 crc kubenswrapper[4910]: I0105 23:37:18.685508 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/93f41ba6-87ac-4772-9c15-daab77bf642f-config\") on node \"crc\" DevicePath \"\"" Jan 05 23:37:18 crc kubenswrapper[4910]: I0105 23:37:18.747568 4910 scope.go:117] "RemoveContainer" containerID="cce1460fc0d17265e00070687fb0ba1f316fa19aac8386da19e05d7b9c4e2365" Jan 05 23:37:18 crc kubenswrapper[4910]: I0105 23:37:18.769823 4910 scope.go:117] "RemoveContainer" containerID="2ecdc7a9d201b71225e7d2bc81d061054255db5c4131a9a2724d7405a0832e73" Jan 05 23:37:18 crc kubenswrapper[4910]: E0105 23:37:18.770411 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2ecdc7a9d201b71225e7d2bc81d061054255db5c4131a9a2724d7405a0832e73\": container with ID starting with 2ecdc7a9d201b71225e7d2bc81d061054255db5c4131a9a2724d7405a0832e73 not found: ID does not exist" containerID="2ecdc7a9d201b71225e7d2bc81d061054255db5c4131a9a2724d7405a0832e73" Jan 05 23:37:18 crc kubenswrapper[4910]: I0105 23:37:18.770454 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ecdc7a9d201b71225e7d2bc81d061054255db5c4131a9a2724d7405a0832e73"} err="failed to get container status \"2ecdc7a9d201b71225e7d2bc81d061054255db5c4131a9a2724d7405a0832e73\": rpc error: code = NotFound desc = could not find container \"2ecdc7a9d201b71225e7d2bc81d061054255db5c4131a9a2724d7405a0832e73\": container with ID starting with 2ecdc7a9d201b71225e7d2bc81d061054255db5c4131a9a2724d7405a0832e73 not found: ID does not exist" Jan 05 23:37:18 crc kubenswrapper[4910]: I0105 23:37:18.770483 4910 scope.go:117] "RemoveContainer" containerID="cce1460fc0d17265e00070687fb0ba1f316fa19aac8386da19e05d7b9c4e2365" Jan 05 23:37:18 crc kubenswrapper[4910]: E0105 23:37:18.770922 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cce1460fc0d17265e00070687fb0ba1f316fa19aac8386da19e05d7b9c4e2365\": container with ID starting with cce1460fc0d17265e00070687fb0ba1f316fa19aac8386da19e05d7b9c4e2365 not found: ID does not exist" containerID="cce1460fc0d17265e00070687fb0ba1f316fa19aac8386da19e05d7b9c4e2365" Jan 05 23:37:18 crc kubenswrapper[4910]: I0105 23:37:18.770985 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cce1460fc0d17265e00070687fb0ba1f316fa19aac8386da19e05d7b9c4e2365"} err="failed to get container status \"cce1460fc0d17265e00070687fb0ba1f316fa19aac8386da19e05d7b9c4e2365\": rpc error: code = NotFound desc = could not find container \"cce1460fc0d17265e00070687fb0ba1f316fa19aac8386da19e05d7b9c4e2365\": container with ID starting with cce1460fc0d17265e00070687fb0ba1f316fa19aac8386da19e05d7b9c4e2365 not found: ID does not exist" Jan 05 23:37:18 crc kubenswrapper[4910]: I0105 23:37:18.916883 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7764dc59f9-rw8cg"] Jan 05 23:37:18 crc kubenswrapper[4910]: I0105 23:37:18.924470 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7764dc59f9-rw8cg"] Jan 05 23:37:20 crc kubenswrapper[4910]: I0105 23:37:20.743641 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93f41ba6-87ac-4772-9c15-daab77bf642f" path="/var/lib/kubelet/pods/93f41ba6-87ac-4772-9c15-daab77bf642f/volumes" Jan 05 23:37:29 crc kubenswrapper[4910]: I0105 23:37:29.399314 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7"] Jan 05 23:37:29 crc kubenswrapper[4910]: E0105 23:37:29.400837 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="395236e1-a608-4655-b98a-80166634b17a" containerName="dnsmasq-dns" Jan 05 23:37:29 crc kubenswrapper[4910]: I0105 23:37:29.400855 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="395236e1-a608-4655-b98a-80166634b17a" containerName="dnsmasq-dns" Jan 05 23:37:29 crc kubenswrapper[4910]: E0105 23:37:29.400883 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="395236e1-a608-4655-b98a-80166634b17a" containerName="init" Jan 05 23:37:29 crc kubenswrapper[4910]: I0105 23:37:29.400889 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="395236e1-a608-4655-b98a-80166634b17a" containerName="init" Jan 05 23:37:29 crc kubenswrapper[4910]: E0105 23:37:29.400897 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93f41ba6-87ac-4772-9c15-daab77bf642f" containerName="init" Jan 05 23:37:29 crc kubenswrapper[4910]: I0105 23:37:29.400905 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="93f41ba6-87ac-4772-9c15-daab77bf642f" containerName="init" Jan 05 23:37:29 crc kubenswrapper[4910]: E0105 23:37:29.400939 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93f41ba6-87ac-4772-9c15-daab77bf642f" containerName="dnsmasq-dns" Jan 05 23:37:29 crc kubenswrapper[4910]: I0105 23:37:29.400944 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="93f41ba6-87ac-4772-9c15-daab77bf642f" containerName="dnsmasq-dns" Jan 05 23:37:29 crc kubenswrapper[4910]: I0105 23:37:29.401190 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="395236e1-a608-4655-b98a-80166634b17a" containerName="dnsmasq-dns" Jan 05 23:37:29 crc kubenswrapper[4910]: I0105 23:37:29.401207 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="93f41ba6-87ac-4772-9c15-daab77bf642f" containerName="dnsmasq-dns" Jan 05 23:37:29 crc kubenswrapper[4910]: I0105 23:37:29.402083 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7" Jan 05 23:37:29 crc kubenswrapper[4910]: I0105 23:37:29.406176 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 05 23:37:29 crc kubenswrapper[4910]: I0105 23:37:29.406454 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 05 23:37:29 crc kubenswrapper[4910]: I0105 23:37:29.406625 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-s4f5x" Jan 05 23:37:29 crc kubenswrapper[4910]: I0105 23:37:29.408861 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7"] Jan 05 23:37:29 crc kubenswrapper[4910]: I0105 23:37:29.415461 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 05 23:37:29 crc kubenswrapper[4910]: I0105 23:37:29.488238 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a544f9e-9220-4cea-8c49-09d188124708-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7\" (UID: \"8a544f9e-9220-4cea-8c49-09d188124708\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7" Jan 05 23:37:29 crc kubenswrapper[4910]: I0105 23:37:29.488340 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8a544f9e-9220-4cea-8c49-09d188124708-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7\" (UID: \"8a544f9e-9220-4cea-8c49-09d188124708\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7" Jan 05 23:37:29 crc kubenswrapper[4910]: I0105 23:37:29.488387 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lh8jz\" (UniqueName: \"kubernetes.io/projected/8a544f9e-9220-4cea-8c49-09d188124708-kube-api-access-lh8jz\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7\" (UID: \"8a544f9e-9220-4cea-8c49-09d188124708\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7" Jan 05 23:37:29 crc kubenswrapper[4910]: I0105 23:37:29.488455 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8a544f9e-9220-4cea-8c49-09d188124708-ssh-key\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7\" (UID: \"8a544f9e-9220-4cea-8c49-09d188124708\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7" Jan 05 23:37:29 crc kubenswrapper[4910]: I0105 23:37:29.488586 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8a544f9e-9220-4cea-8c49-09d188124708-ceph\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7\" (UID: \"8a544f9e-9220-4cea-8c49-09d188124708\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7" Jan 05 23:37:29 crc kubenswrapper[4910]: I0105 23:37:29.591641 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a544f9e-9220-4cea-8c49-09d188124708-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7\" (UID: \"8a544f9e-9220-4cea-8c49-09d188124708\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7" Jan 05 23:37:29 crc kubenswrapper[4910]: I0105 23:37:29.591739 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8a544f9e-9220-4cea-8c49-09d188124708-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7\" (UID: \"8a544f9e-9220-4cea-8c49-09d188124708\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7" Jan 05 23:37:29 crc kubenswrapper[4910]: I0105 23:37:29.591775 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lh8jz\" (UniqueName: \"kubernetes.io/projected/8a544f9e-9220-4cea-8c49-09d188124708-kube-api-access-lh8jz\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7\" (UID: \"8a544f9e-9220-4cea-8c49-09d188124708\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7" Jan 05 23:37:29 crc kubenswrapper[4910]: I0105 23:37:29.591830 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8a544f9e-9220-4cea-8c49-09d188124708-ssh-key\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7\" (UID: \"8a544f9e-9220-4cea-8c49-09d188124708\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7" Jan 05 23:37:29 crc kubenswrapper[4910]: I0105 23:37:29.592001 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8a544f9e-9220-4cea-8c49-09d188124708-ceph\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7\" (UID: \"8a544f9e-9220-4cea-8c49-09d188124708\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7" Jan 05 23:37:29 crc kubenswrapper[4910]: I0105 23:37:29.599381 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8a544f9e-9220-4cea-8c49-09d188124708-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7\" (UID: \"8a544f9e-9220-4cea-8c49-09d188124708\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7" Jan 05 23:37:29 crc kubenswrapper[4910]: I0105 23:37:29.601859 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8a544f9e-9220-4cea-8c49-09d188124708-ssh-key\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7\" (UID: \"8a544f9e-9220-4cea-8c49-09d188124708\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7" Jan 05 23:37:29 crc kubenswrapper[4910]: I0105 23:37:29.602187 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8a544f9e-9220-4cea-8c49-09d188124708-ceph\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7\" (UID: \"8a544f9e-9220-4cea-8c49-09d188124708\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7" Jan 05 23:37:29 crc kubenswrapper[4910]: I0105 23:37:29.613580 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a544f9e-9220-4cea-8c49-09d188124708-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7\" (UID: \"8a544f9e-9220-4cea-8c49-09d188124708\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7" Jan 05 23:37:29 crc kubenswrapper[4910]: I0105 23:37:29.613852 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lh8jz\" (UniqueName: \"kubernetes.io/projected/8a544f9e-9220-4cea-8c49-09d188124708-kube-api-access-lh8jz\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7\" (UID: \"8a544f9e-9220-4cea-8c49-09d188124708\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7" Jan 05 23:37:29 crc kubenswrapper[4910]: I0105 23:37:29.737244 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7" Jan 05 23:37:30 crc kubenswrapper[4910]: I0105 23:37:30.324298 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7"] Jan 05 23:37:30 crc kubenswrapper[4910]: I0105 23:37:30.330643 4910 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 05 23:37:30 crc kubenswrapper[4910]: I0105 23:37:30.759508 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7" event={"ID":"8a544f9e-9220-4cea-8c49-09d188124708","Type":"ContainerStarted","Data":"47b83a8344f7d029b589d1d0a23ee7f5f7951ce610c6e5b41fff88cc78bc2118"} Jan 05 23:37:42 crc kubenswrapper[4910]: I0105 23:37:42.901983 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7" event={"ID":"8a544f9e-9220-4cea-8c49-09d188124708","Type":"ContainerStarted","Data":"99d6c62501cbbf77de8cbf7c385feb08ee8afb211cd221843a1d6eb0b2c77d98"} Jan 05 23:37:42 crc kubenswrapper[4910]: I0105 23:37:42.932214 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7" podStartSLOduration=2.559124767 podStartE2EDuration="13.932161126s" podCreationTimestamp="2026-01-05 23:37:29 +0000 UTC" firstStartedPulling="2026-01-05 23:37:30.330184979 +0000 UTC m=+6381.907682649" lastFinishedPulling="2026-01-05 23:37:41.703221328 +0000 UTC m=+6393.280719008" observedRunningTime="2026-01-05 23:37:42.922188728 +0000 UTC m=+6394.499686418" watchObservedRunningTime="2026-01-05 23:37:42.932161126 +0000 UTC m=+6394.509658806" Jan 05 23:37:48 crc kubenswrapper[4910]: I0105 23:37:48.918454 4910 scope.go:117] "RemoveContainer" containerID="0ff158ca14212e856e6f7fb8b45bd0287ea58054ef34a23d5726d253b6264066" Jan 05 23:37:49 crc kubenswrapper[4910]: I0105 23:37:49.132562 4910 scope.go:117] "RemoveContainer" containerID="a56ac7db0904879bece7f3b7232931532b1c5b680aefe44b5532385b4cdde7fe" Jan 05 23:37:51 crc kubenswrapper[4910]: I0105 23:37:51.027552 4910 generic.go:334] "Generic (PLEG): container finished" podID="8a544f9e-9220-4cea-8c49-09d188124708" containerID="99d6c62501cbbf77de8cbf7c385feb08ee8afb211cd221843a1d6eb0b2c77d98" exitCode=2 Jan 05 23:37:51 crc kubenswrapper[4910]: I0105 23:37:51.027770 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7" event={"ID":"8a544f9e-9220-4cea-8c49-09d188124708","Type":"ContainerDied","Data":"99d6c62501cbbf77de8cbf7c385feb08ee8afb211cd221843a1d6eb0b2c77d98"} Jan 05 23:37:52 crc kubenswrapper[4910]: I0105 23:37:52.656635 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7" Jan 05 23:37:52 crc kubenswrapper[4910]: I0105 23:37:52.821927 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8a544f9e-9220-4cea-8c49-09d188124708-ssh-key\") pod \"8a544f9e-9220-4cea-8c49-09d188124708\" (UID: \"8a544f9e-9220-4cea-8c49-09d188124708\") " Jan 05 23:37:52 crc kubenswrapper[4910]: I0105 23:37:52.822045 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8a544f9e-9220-4cea-8c49-09d188124708-ceph\") pod \"8a544f9e-9220-4cea-8c49-09d188124708\" (UID: \"8a544f9e-9220-4cea-8c49-09d188124708\") " Jan 05 23:37:52 crc kubenswrapper[4910]: I0105 23:37:52.822278 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8a544f9e-9220-4cea-8c49-09d188124708-inventory\") pod \"8a544f9e-9220-4cea-8c49-09d188124708\" (UID: \"8a544f9e-9220-4cea-8c49-09d188124708\") " Jan 05 23:37:52 crc kubenswrapper[4910]: I0105 23:37:52.822416 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lh8jz\" (UniqueName: \"kubernetes.io/projected/8a544f9e-9220-4cea-8c49-09d188124708-kube-api-access-lh8jz\") pod \"8a544f9e-9220-4cea-8c49-09d188124708\" (UID: \"8a544f9e-9220-4cea-8c49-09d188124708\") " Jan 05 23:37:52 crc kubenswrapper[4910]: I0105 23:37:52.822587 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a544f9e-9220-4cea-8c49-09d188124708-pre-adoption-validation-combined-ca-bundle\") pod \"8a544f9e-9220-4cea-8c49-09d188124708\" (UID: \"8a544f9e-9220-4cea-8c49-09d188124708\") " Jan 05 23:37:52 crc kubenswrapper[4910]: I0105 23:37:52.828370 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a544f9e-9220-4cea-8c49-09d188124708-pre-adoption-validation-combined-ca-bundle" (OuterVolumeSpecName: "pre-adoption-validation-combined-ca-bundle") pod "8a544f9e-9220-4cea-8c49-09d188124708" (UID: "8a544f9e-9220-4cea-8c49-09d188124708"). InnerVolumeSpecName "pre-adoption-validation-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:37:52 crc kubenswrapper[4910]: I0105 23:37:52.828562 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a544f9e-9220-4cea-8c49-09d188124708-kube-api-access-lh8jz" (OuterVolumeSpecName: "kube-api-access-lh8jz") pod "8a544f9e-9220-4cea-8c49-09d188124708" (UID: "8a544f9e-9220-4cea-8c49-09d188124708"). InnerVolumeSpecName "kube-api-access-lh8jz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:37:52 crc kubenswrapper[4910]: I0105 23:37:52.829274 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a544f9e-9220-4cea-8c49-09d188124708-ceph" (OuterVolumeSpecName: "ceph") pod "8a544f9e-9220-4cea-8c49-09d188124708" (UID: "8a544f9e-9220-4cea-8c49-09d188124708"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:37:52 crc kubenswrapper[4910]: I0105 23:37:52.864217 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a544f9e-9220-4cea-8c49-09d188124708-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "8a544f9e-9220-4cea-8c49-09d188124708" (UID: "8a544f9e-9220-4cea-8c49-09d188124708"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:37:52 crc kubenswrapper[4910]: I0105 23:37:52.881379 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a544f9e-9220-4cea-8c49-09d188124708-inventory" (OuterVolumeSpecName: "inventory") pod "8a544f9e-9220-4cea-8c49-09d188124708" (UID: "8a544f9e-9220-4cea-8c49-09d188124708"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:37:52 crc kubenswrapper[4910]: I0105 23:37:52.925946 4910 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8a544f9e-9220-4cea-8c49-09d188124708-ssh-key\") on node \"crc\" DevicePath \"\"" Jan 05 23:37:52 crc kubenswrapper[4910]: I0105 23:37:52.926374 4910 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8a544f9e-9220-4cea-8c49-09d188124708-ceph\") on node \"crc\" DevicePath \"\"" Jan 05 23:37:52 crc kubenswrapper[4910]: I0105 23:37:52.926518 4910 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8a544f9e-9220-4cea-8c49-09d188124708-inventory\") on node \"crc\" DevicePath \"\"" Jan 05 23:37:52 crc kubenswrapper[4910]: I0105 23:37:52.926636 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lh8jz\" (UniqueName: \"kubernetes.io/projected/8a544f9e-9220-4cea-8c49-09d188124708-kube-api-access-lh8jz\") on node \"crc\" DevicePath \"\"" Jan 05 23:37:52 crc kubenswrapper[4910]: I0105 23:37:52.926782 4910 reconciler_common.go:293] "Volume detached for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a544f9e-9220-4cea-8c49-09d188124708-pre-adoption-validation-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:37:53 crc kubenswrapper[4910]: I0105 23:37:53.055018 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7" event={"ID":"8a544f9e-9220-4cea-8c49-09d188124708","Type":"ContainerDied","Data":"47b83a8344f7d029b589d1d0a23ee7f5f7951ce610c6e5b41fff88cc78bc2118"} Jan 05 23:37:53 crc kubenswrapper[4910]: I0105 23:37:53.055375 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="47b83a8344f7d029b589d1d0a23ee7f5f7951ce610c6e5b41fff88cc78bc2118" Jan 05 23:37:53 crc kubenswrapper[4910]: I0105 23:37:53.055205 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7" Jan 05 23:38:00 crc kubenswrapper[4910]: I0105 23:38:00.048837 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt"] Jan 05 23:38:00 crc kubenswrapper[4910]: E0105 23:38:00.050341 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a544f9e-9220-4cea-8c49-09d188124708" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Jan 05 23:38:00 crc kubenswrapper[4910]: I0105 23:38:00.050367 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a544f9e-9220-4cea-8c49-09d188124708" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Jan 05 23:38:00 crc kubenswrapper[4910]: I0105 23:38:00.050803 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a544f9e-9220-4cea-8c49-09d188124708" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Jan 05 23:38:00 crc kubenswrapper[4910]: I0105 23:38:00.052931 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt" Jan 05 23:38:00 crc kubenswrapper[4910]: I0105 23:38:00.060383 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 05 23:38:00 crc kubenswrapper[4910]: I0105 23:38:00.061785 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-s4f5x" Jan 05 23:38:00 crc kubenswrapper[4910]: I0105 23:38:00.067547 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 05 23:38:00 crc kubenswrapper[4910]: I0105 23:38:00.067855 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 05 23:38:00 crc kubenswrapper[4910]: I0105 23:38:00.083416 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt"] Jan 05 23:38:00 crc kubenswrapper[4910]: I0105 23:38:00.238373 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e10e76ce-5721-496e-93bc-b3566d1a3d8e-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt\" (UID: \"e10e76ce-5721-496e-93bc-b3566d1a3d8e\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt" Jan 05 23:38:00 crc kubenswrapper[4910]: I0105 23:38:00.238633 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e10e76ce-5721-496e-93bc-b3566d1a3d8e-ceph\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt\" (UID: \"e10e76ce-5721-496e-93bc-b3566d1a3d8e\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt" Jan 05 23:38:00 crc kubenswrapper[4910]: I0105 23:38:00.238728 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m67t9\" (UniqueName: \"kubernetes.io/projected/e10e76ce-5721-496e-93bc-b3566d1a3d8e-kube-api-access-m67t9\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt\" (UID: \"e10e76ce-5721-496e-93bc-b3566d1a3d8e\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt" Jan 05 23:38:00 crc kubenswrapper[4910]: I0105 23:38:00.238860 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e10e76ce-5721-496e-93bc-b3566d1a3d8e-ssh-key\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt\" (UID: \"e10e76ce-5721-496e-93bc-b3566d1a3d8e\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt" Jan 05 23:38:00 crc kubenswrapper[4910]: I0105 23:38:00.238901 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e10e76ce-5721-496e-93bc-b3566d1a3d8e-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt\" (UID: \"e10e76ce-5721-496e-93bc-b3566d1a3d8e\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt" Jan 05 23:38:00 crc kubenswrapper[4910]: I0105 23:38:00.341692 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e10e76ce-5721-496e-93bc-b3566d1a3d8e-ceph\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt\" (UID: \"e10e76ce-5721-496e-93bc-b3566d1a3d8e\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt" Jan 05 23:38:00 crc kubenswrapper[4910]: I0105 23:38:00.342178 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m67t9\" (UniqueName: \"kubernetes.io/projected/e10e76ce-5721-496e-93bc-b3566d1a3d8e-kube-api-access-m67t9\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt\" (UID: \"e10e76ce-5721-496e-93bc-b3566d1a3d8e\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt" Jan 05 23:38:00 crc kubenswrapper[4910]: I0105 23:38:00.342965 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e10e76ce-5721-496e-93bc-b3566d1a3d8e-ssh-key\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt\" (UID: \"e10e76ce-5721-496e-93bc-b3566d1a3d8e\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt" Jan 05 23:38:00 crc kubenswrapper[4910]: I0105 23:38:00.343998 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e10e76ce-5721-496e-93bc-b3566d1a3d8e-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt\" (UID: \"e10e76ce-5721-496e-93bc-b3566d1a3d8e\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt" Jan 05 23:38:00 crc kubenswrapper[4910]: I0105 23:38:00.344531 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e10e76ce-5721-496e-93bc-b3566d1a3d8e-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt\" (UID: \"e10e76ce-5721-496e-93bc-b3566d1a3d8e\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt" Jan 05 23:38:00 crc kubenswrapper[4910]: I0105 23:38:00.351042 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e10e76ce-5721-496e-93bc-b3566d1a3d8e-ceph\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt\" (UID: \"e10e76ce-5721-496e-93bc-b3566d1a3d8e\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt" Jan 05 23:38:00 crc kubenswrapper[4910]: I0105 23:38:00.351199 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e10e76ce-5721-496e-93bc-b3566d1a3d8e-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt\" (UID: \"e10e76ce-5721-496e-93bc-b3566d1a3d8e\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt" Jan 05 23:38:00 crc kubenswrapper[4910]: I0105 23:38:00.352050 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e10e76ce-5721-496e-93bc-b3566d1a3d8e-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt\" (UID: \"e10e76ce-5721-496e-93bc-b3566d1a3d8e\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt" Jan 05 23:38:00 crc kubenswrapper[4910]: I0105 23:38:00.352881 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e10e76ce-5721-496e-93bc-b3566d1a3d8e-ssh-key\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt\" (UID: \"e10e76ce-5721-496e-93bc-b3566d1a3d8e\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt" Jan 05 23:38:00 crc kubenswrapper[4910]: I0105 23:38:00.369351 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m67t9\" (UniqueName: \"kubernetes.io/projected/e10e76ce-5721-496e-93bc-b3566d1a3d8e-kube-api-access-m67t9\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt\" (UID: \"e10e76ce-5721-496e-93bc-b3566d1a3d8e\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt" Jan 05 23:38:00 crc kubenswrapper[4910]: I0105 23:38:00.389007 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt" Jan 05 23:38:01 crc kubenswrapper[4910]: I0105 23:38:01.058964 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt"] Jan 05 23:38:01 crc kubenswrapper[4910]: I0105 23:38:01.170356 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt" event={"ID":"e10e76ce-5721-496e-93bc-b3566d1a3d8e","Type":"ContainerStarted","Data":"a8f6a68f55b9086919e5be425534be385cc39f51d079d3dc1aa0c3d22531654b"} Jan 05 23:38:02 crc kubenswrapper[4910]: I0105 23:38:02.188000 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt" event={"ID":"e10e76ce-5721-496e-93bc-b3566d1a3d8e","Type":"ContainerStarted","Data":"df711875e48b306ee61675364d8cc03543ba50f2d9938263b8d80fec2b2ba0cb"} Jan 05 23:38:02 crc kubenswrapper[4910]: I0105 23:38:02.225705 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt" podStartSLOduration=1.690518328 podStartE2EDuration="2.225678466s" podCreationTimestamp="2026-01-05 23:38:00 +0000 UTC" firstStartedPulling="2026-01-05 23:38:01.061793408 +0000 UTC m=+6412.639291098" lastFinishedPulling="2026-01-05 23:38:01.596953556 +0000 UTC m=+6413.174451236" observedRunningTime="2026-01-05 23:38:02.209607012 +0000 UTC m=+6413.787104722" watchObservedRunningTime="2026-01-05 23:38:02.225678466 +0000 UTC m=+6413.803176146" Jan 05 23:38:11 crc kubenswrapper[4910]: I0105 23:38:11.357561 4910 generic.go:334] "Generic (PLEG): container finished" podID="e10e76ce-5721-496e-93bc-b3566d1a3d8e" containerID="df711875e48b306ee61675364d8cc03543ba50f2d9938263b8d80fec2b2ba0cb" exitCode=2 Jan 05 23:38:11 crc kubenswrapper[4910]: I0105 23:38:11.358303 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt" event={"ID":"e10e76ce-5721-496e-93bc-b3566d1a3d8e","Type":"ContainerDied","Data":"df711875e48b306ee61675364d8cc03543ba50f2d9938263b8d80fec2b2ba0cb"} Jan 05 23:38:12 crc kubenswrapper[4910]: I0105 23:38:12.873528 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt" Jan 05 23:38:12 crc kubenswrapper[4910]: I0105 23:38:12.879414 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e10e76ce-5721-496e-93bc-b3566d1a3d8e-inventory\") pod \"e10e76ce-5721-496e-93bc-b3566d1a3d8e\" (UID: \"e10e76ce-5721-496e-93bc-b3566d1a3d8e\") " Jan 05 23:38:12 crc kubenswrapper[4910]: I0105 23:38:12.880330 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e10e76ce-5721-496e-93bc-b3566d1a3d8e-ceph\") pod \"e10e76ce-5721-496e-93bc-b3566d1a3d8e\" (UID: \"e10e76ce-5721-496e-93bc-b3566d1a3d8e\") " Jan 05 23:38:12 crc kubenswrapper[4910]: I0105 23:38:12.880597 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m67t9\" (UniqueName: \"kubernetes.io/projected/e10e76ce-5721-496e-93bc-b3566d1a3d8e-kube-api-access-m67t9\") pod \"e10e76ce-5721-496e-93bc-b3566d1a3d8e\" (UID: \"e10e76ce-5721-496e-93bc-b3566d1a3d8e\") " Jan 05 23:38:12 crc kubenswrapper[4910]: I0105 23:38:12.880676 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e10e76ce-5721-496e-93bc-b3566d1a3d8e-ssh-key\") pod \"e10e76ce-5721-496e-93bc-b3566d1a3d8e\" (UID: \"e10e76ce-5721-496e-93bc-b3566d1a3d8e\") " Jan 05 23:38:12 crc kubenswrapper[4910]: I0105 23:38:12.880703 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e10e76ce-5721-496e-93bc-b3566d1a3d8e-pre-adoption-validation-combined-ca-bundle\") pod \"e10e76ce-5721-496e-93bc-b3566d1a3d8e\" (UID: \"e10e76ce-5721-496e-93bc-b3566d1a3d8e\") " Jan 05 23:38:12 crc kubenswrapper[4910]: I0105 23:38:12.885716 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e10e76ce-5721-496e-93bc-b3566d1a3d8e-pre-adoption-validation-combined-ca-bundle" (OuterVolumeSpecName: "pre-adoption-validation-combined-ca-bundle") pod "e10e76ce-5721-496e-93bc-b3566d1a3d8e" (UID: "e10e76ce-5721-496e-93bc-b3566d1a3d8e"). InnerVolumeSpecName "pre-adoption-validation-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:38:12 crc kubenswrapper[4910]: I0105 23:38:12.885778 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e10e76ce-5721-496e-93bc-b3566d1a3d8e-kube-api-access-m67t9" (OuterVolumeSpecName: "kube-api-access-m67t9") pod "e10e76ce-5721-496e-93bc-b3566d1a3d8e" (UID: "e10e76ce-5721-496e-93bc-b3566d1a3d8e"). InnerVolumeSpecName "kube-api-access-m67t9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:38:12 crc kubenswrapper[4910]: I0105 23:38:12.888139 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e10e76ce-5721-496e-93bc-b3566d1a3d8e-ceph" (OuterVolumeSpecName: "ceph") pod "e10e76ce-5721-496e-93bc-b3566d1a3d8e" (UID: "e10e76ce-5721-496e-93bc-b3566d1a3d8e"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:38:12 crc kubenswrapper[4910]: I0105 23:38:12.926158 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e10e76ce-5721-496e-93bc-b3566d1a3d8e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e10e76ce-5721-496e-93bc-b3566d1a3d8e" (UID: "e10e76ce-5721-496e-93bc-b3566d1a3d8e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:38:12 crc kubenswrapper[4910]: I0105 23:38:12.928105 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e10e76ce-5721-496e-93bc-b3566d1a3d8e-inventory" (OuterVolumeSpecName: "inventory") pod "e10e76ce-5721-496e-93bc-b3566d1a3d8e" (UID: "e10e76ce-5721-496e-93bc-b3566d1a3d8e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:38:12 crc kubenswrapper[4910]: I0105 23:38:12.983561 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m67t9\" (UniqueName: \"kubernetes.io/projected/e10e76ce-5721-496e-93bc-b3566d1a3d8e-kube-api-access-m67t9\") on node \"crc\" DevicePath \"\"" Jan 05 23:38:12 crc kubenswrapper[4910]: I0105 23:38:12.983598 4910 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e10e76ce-5721-496e-93bc-b3566d1a3d8e-ssh-key\") on node \"crc\" DevicePath \"\"" Jan 05 23:38:12 crc kubenswrapper[4910]: I0105 23:38:12.983609 4910 reconciler_common.go:293] "Volume detached for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e10e76ce-5721-496e-93bc-b3566d1a3d8e-pre-adoption-validation-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 05 23:38:12 crc kubenswrapper[4910]: I0105 23:38:12.983622 4910 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e10e76ce-5721-496e-93bc-b3566d1a3d8e-inventory\") on node \"crc\" DevicePath \"\"" Jan 05 23:38:12 crc kubenswrapper[4910]: I0105 23:38:12.983632 4910 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e10e76ce-5721-496e-93bc-b3566d1a3d8e-ceph\") on node \"crc\" DevicePath \"\"" Jan 05 23:38:13 crc kubenswrapper[4910]: I0105 23:38:13.383229 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt" event={"ID":"e10e76ce-5721-496e-93bc-b3566d1a3d8e","Type":"ContainerDied","Data":"a8f6a68f55b9086919e5be425534be385cc39f51d079d3dc1aa0c3d22531654b"} Jan 05 23:38:13 crc kubenswrapper[4910]: I0105 23:38:13.383610 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a8f6a68f55b9086919e5be425534be385cc39f51d079d3dc1aa0c3d22531654b" Jan 05 23:38:13 crc kubenswrapper[4910]: I0105 23:38:13.383313 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt" Jan 05 23:38:19 crc kubenswrapper[4910]: I0105 23:38:19.239788 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-2sdfk"] Jan 05 23:38:19 crc kubenswrapper[4910]: E0105 23:38:19.241055 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e10e76ce-5721-496e-93bc-b3566d1a3d8e" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Jan 05 23:38:19 crc kubenswrapper[4910]: I0105 23:38:19.241077 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="e10e76ce-5721-496e-93bc-b3566d1a3d8e" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Jan 05 23:38:19 crc kubenswrapper[4910]: I0105 23:38:19.241393 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="e10e76ce-5721-496e-93bc-b3566d1a3d8e" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Jan 05 23:38:19 crc kubenswrapper[4910]: I0105 23:38:19.243591 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2sdfk" Jan 05 23:38:19 crc kubenswrapper[4910]: I0105 23:38:19.253036 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2sdfk"] Jan 05 23:38:19 crc kubenswrapper[4910]: I0105 23:38:19.254996 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4k5wz\" (UniqueName: \"kubernetes.io/projected/ba1ede8e-bc65-45f9-828c-1b440cfe9c98-kube-api-access-4k5wz\") pod \"community-operators-2sdfk\" (UID: \"ba1ede8e-bc65-45f9-828c-1b440cfe9c98\") " pod="openshift-marketplace/community-operators-2sdfk" Jan 05 23:38:19 crc kubenswrapper[4910]: I0105 23:38:19.255095 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba1ede8e-bc65-45f9-828c-1b440cfe9c98-utilities\") pod \"community-operators-2sdfk\" (UID: \"ba1ede8e-bc65-45f9-828c-1b440cfe9c98\") " pod="openshift-marketplace/community-operators-2sdfk" Jan 05 23:38:19 crc kubenswrapper[4910]: I0105 23:38:19.255224 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba1ede8e-bc65-45f9-828c-1b440cfe9c98-catalog-content\") pod \"community-operators-2sdfk\" (UID: \"ba1ede8e-bc65-45f9-828c-1b440cfe9c98\") " pod="openshift-marketplace/community-operators-2sdfk" Jan 05 23:38:19 crc kubenswrapper[4910]: I0105 23:38:19.368810 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4k5wz\" (UniqueName: \"kubernetes.io/projected/ba1ede8e-bc65-45f9-828c-1b440cfe9c98-kube-api-access-4k5wz\") pod \"community-operators-2sdfk\" (UID: \"ba1ede8e-bc65-45f9-828c-1b440cfe9c98\") " pod="openshift-marketplace/community-operators-2sdfk" Jan 05 23:38:19 crc kubenswrapper[4910]: I0105 23:38:19.369214 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba1ede8e-bc65-45f9-828c-1b440cfe9c98-utilities\") pod \"community-operators-2sdfk\" (UID: \"ba1ede8e-bc65-45f9-828c-1b440cfe9c98\") " pod="openshift-marketplace/community-operators-2sdfk" Jan 05 23:38:19 crc kubenswrapper[4910]: I0105 23:38:19.369314 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba1ede8e-bc65-45f9-828c-1b440cfe9c98-catalog-content\") pod \"community-operators-2sdfk\" (UID: \"ba1ede8e-bc65-45f9-828c-1b440cfe9c98\") " pod="openshift-marketplace/community-operators-2sdfk" Jan 05 23:38:19 crc kubenswrapper[4910]: I0105 23:38:19.370069 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba1ede8e-bc65-45f9-828c-1b440cfe9c98-catalog-content\") pod \"community-operators-2sdfk\" (UID: \"ba1ede8e-bc65-45f9-828c-1b440cfe9c98\") " pod="openshift-marketplace/community-operators-2sdfk" Jan 05 23:38:19 crc kubenswrapper[4910]: I0105 23:38:19.371596 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba1ede8e-bc65-45f9-828c-1b440cfe9c98-utilities\") pod \"community-operators-2sdfk\" (UID: \"ba1ede8e-bc65-45f9-828c-1b440cfe9c98\") " pod="openshift-marketplace/community-operators-2sdfk" Jan 05 23:38:19 crc kubenswrapper[4910]: I0105 23:38:19.392265 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4k5wz\" (UniqueName: \"kubernetes.io/projected/ba1ede8e-bc65-45f9-828c-1b440cfe9c98-kube-api-access-4k5wz\") pod \"community-operators-2sdfk\" (UID: \"ba1ede8e-bc65-45f9-828c-1b440cfe9c98\") " pod="openshift-marketplace/community-operators-2sdfk" Jan 05 23:38:19 crc kubenswrapper[4910]: I0105 23:38:19.609964 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2sdfk" Jan 05 23:38:20 crc kubenswrapper[4910]: I0105 23:38:20.096974 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2sdfk"] Jan 05 23:38:20 crc kubenswrapper[4910]: W0105 23:38:20.103285 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podba1ede8e_bc65_45f9_828c_1b440cfe9c98.slice/crio-f64a92ea1adc5c8b39e6f86dd2e930e0dac874df92e5055d1cb0ae54ace64b28 WatchSource:0}: Error finding container f64a92ea1adc5c8b39e6f86dd2e930e0dac874df92e5055d1cb0ae54ace64b28: Status 404 returned error can't find the container with id f64a92ea1adc5c8b39e6f86dd2e930e0dac874df92e5055d1cb0ae54ace64b28 Jan 05 23:38:20 crc kubenswrapper[4910]: I0105 23:38:20.466505 4910 generic.go:334] "Generic (PLEG): container finished" podID="ba1ede8e-bc65-45f9-828c-1b440cfe9c98" containerID="8df2fc279c8cd688b11dd85691ba0d7818ae59e66769142e9e9ef34b3479c735" exitCode=0 Jan 05 23:38:20 crc kubenswrapper[4910]: I0105 23:38:20.467243 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2sdfk" event={"ID":"ba1ede8e-bc65-45f9-828c-1b440cfe9c98","Type":"ContainerDied","Data":"8df2fc279c8cd688b11dd85691ba0d7818ae59e66769142e9e9ef34b3479c735"} Jan 05 23:38:20 crc kubenswrapper[4910]: I0105 23:38:20.468035 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2sdfk" event={"ID":"ba1ede8e-bc65-45f9-828c-1b440cfe9c98","Type":"ContainerStarted","Data":"f64a92ea1adc5c8b39e6f86dd2e930e0dac874df92e5055d1cb0ae54ace64b28"} Jan 05 23:38:20 crc kubenswrapper[4910]: E0105 23:38:20.476782 4910 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podba1ede8e_bc65_45f9_828c_1b440cfe9c98.slice/crio-conmon-8df2fc279c8cd688b11dd85691ba0d7818ae59e66769142e9e9ef34b3479c735.scope\": RecentStats: unable to find data in memory cache]" Jan 05 23:38:21 crc kubenswrapper[4910]: I0105 23:38:21.479616 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2sdfk" event={"ID":"ba1ede8e-bc65-45f9-828c-1b440cfe9c98","Type":"ContainerStarted","Data":"bdf7be55eff7972ce6c2cc61eb670e58d9cde1fef36aef5c78da3f3d8ce10c3b"} Jan 05 23:38:22 crc kubenswrapper[4910]: I0105 23:38:22.496110 4910 generic.go:334] "Generic (PLEG): container finished" podID="ba1ede8e-bc65-45f9-828c-1b440cfe9c98" containerID="bdf7be55eff7972ce6c2cc61eb670e58d9cde1fef36aef5c78da3f3d8ce10c3b" exitCode=0 Jan 05 23:38:22 crc kubenswrapper[4910]: I0105 23:38:22.496286 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2sdfk" event={"ID":"ba1ede8e-bc65-45f9-828c-1b440cfe9c98","Type":"ContainerDied","Data":"bdf7be55eff7972ce6c2cc61eb670e58d9cde1fef36aef5c78da3f3d8ce10c3b"} Jan 05 23:38:23 crc kubenswrapper[4910]: I0105 23:38:23.509431 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2sdfk" event={"ID":"ba1ede8e-bc65-45f9-828c-1b440cfe9c98","Type":"ContainerStarted","Data":"9a98a3260fa3425fab3b392473083930730a5018fb9a91f62c719668429657f3"} Jan 05 23:38:23 crc kubenswrapper[4910]: I0105 23:38:23.536921 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-2sdfk" podStartSLOduration=2.007286443 podStartE2EDuration="4.536905272s" podCreationTimestamp="2026-01-05 23:38:19 +0000 UTC" firstStartedPulling="2026-01-05 23:38:20.469308998 +0000 UTC m=+6432.046806678" lastFinishedPulling="2026-01-05 23:38:22.998927837 +0000 UTC m=+6434.576425507" observedRunningTime="2026-01-05 23:38:23.534473734 +0000 UTC m=+6435.111971394" watchObservedRunningTime="2026-01-05 23:38:23.536905272 +0000 UTC m=+6435.114402952" Jan 05 23:38:29 crc kubenswrapper[4910]: I0105 23:38:29.610211 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-2sdfk" Jan 05 23:38:29 crc kubenswrapper[4910]: I0105 23:38:29.611008 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-2sdfk" Jan 05 23:38:29 crc kubenswrapper[4910]: I0105 23:38:29.695693 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-2sdfk" Jan 05 23:38:30 crc kubenswrapper[4910]: I0105 23:38:30.695452 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-2sdfk" Jan 05 23:38:30 crc kubenswrapper[4910]: I0105 23:38:30.765086 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2sdfk"] Jan 05 23:38:32 crc kubenswrapper[4910]: I0105 23:38:32.631596 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-2sdfk" podUID="ba1ede8e-bc65-45f9-828c-1b440cfe9c98" containerName="registry-server" containerID="cri-o://9a98a3260fa3425fab3b392473083930730a5018fb9a91f62c719668429657f3" gracePeriod=2 Jan 05 23:38:33 crc kubenswrapper[4910]: I0105 23:38:33.197992 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2sdfk" Jan 05 23:38:33 crc kubenswrapper[4910]: I0105 23:38:33.338573 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba1ede8e-bc65-45f9-828c-1b440cfe9c98-catalog-content\") pod \"ba1ede8e-bc65-45f9-828c-1b440cfe9c98\" (UID: \"ba1ede8e-bc65-45f9-828c-1b440cfe9c98\") " Jan 05 23:38:33 crc kubenswrapper[4910]: I0105 23:38:33.339052 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4k5wz\" (UniqueName: \"kubernetes.io/projected/ba1ede8e-bc65-45f9-828c-1b440cfe9c98-kube-api-access-4k5wz\") pod \"ba1ede8e-bc65-45f9-828c-1b440cfe9c98\" (UID: \"ba1ede8e-bc65-45f9-828c-1b440cfe9c98\") " Jan 05 23:38:33 crc kubenswrapper[4910]: I0105 23:38:33.339105 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba1ede8e-bc65-45f9-828c-1b440cfe9c98-utilities\") pod \"ba1ede8e-bc65-45f9-828c-1b440cfe9c98\" (UID: \"ba1ede8e-bc65-45f9-828c-1b440cfe9c98\") " Jan 05 23:38:33 crc kubenswrapper[4910]: I0105 23:38:33.341840 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba1ede8e-bc65-45f9-828c-1b440cfe9c98-utilities" (OuterVolumeSpecName: "utilities") pod "ba1ede8e-bc65-45f9-828c-1b440cfe9c98" (UID: "ba1ede8e-bc65-45f9-828c-1b440cfe9c98"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:38:33 crc kubenswrapper[4910]: I0105 23:38:33.348678 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba1ede8e-bc65-45f9-828c-1b440cfe9c98-kube-api-access-4k5wz" (OuterVolumeSpecName: "kube-api-access-4k5wz") pod "ba1ede8e-bc65-45f9-828c-1b440cfe9c98" (UID: "ba1ede8e-bc65-45f9-828c-1b440cfe9c98"). InnerVolumeSpecName "kube-api-access-4k5wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:38:33 crc kubenswrapper[4910]: I0105 23:38:33.396873 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba1ede8e-bc65-45f9-828c-1b440cfe9c98-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ba1ede8e-bc65-45f9-828c-1b440cfe9c98" (UID: "ba1ede8e-bc65-45f9-828c-1b440cfe9c98"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:38:33 crc kubenswrapper[4910]: I0105 23:38:33.442688 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba1ede8e-bc65-45f9-828c-1b440cfe9c98-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 23:38:33 crc kubenswrapper[4910]: I0105 23:38:33.442987 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba1ede8e-bc65-45f9-828c-1b440cfe9c98-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 23:38:33 crc kubenswrapper[4910]: I0105 23:38:33.443149 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4k5wz\" (UniqueName: \"kubernetes.io/projected/ba1ede8e-bc65-45f9-828c-1b440cfe9c98-kube-api-access-4k5wz\") on node \"crc\" DevicePath \"\"" Jan 05 23:38:33 crc kubenswrapper[4910]: I0105 23:38:33.648337 4910 generic.go:334] "Generic (PLEG): container finished" podID="ba1ede8e-bc65-45f9-828c-1b440cfe9c98" containerID="9a98a3260fa3425fab3b392473083930730a5018fb9a91f62c719668429657f3" exitCode=0 Jan 05 23:38:33 crc kubenswrapper[4910]: I0105 23:38:33.648380 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2sdfk" event={"ID":"ba1ede8e-bc65-45f9-828c-1b440cfe9c98","Type":"ContainerDied","Data":"9a98a3260fa3425fab3b392473083930730a5018fb9a91f62c719668429657f3"} Jan 05 23:38:33 crc kubenswrapper[4910]: I0105 23:38:33.648407 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2sdfk" event={"ID":"ba1ede8e-bc65-45f9-828c-1b440cfe9c98","Type":"ContainerDied","Data":"f64a92ea1adc5c8b39e6f86dd2e930e0dac874df92e5055d1cb0ae54ace64b28"} Jan 05 23:38:33 crc kubenswrapper[4910]: I0105 23:38:33.648439 4910 scope.go:117] "RemoveContainer" containerID="9a98a3260fa3425fab3b392473083930730a5018fb9a91f62c719668429657f3" Jan 05 23:38:33 crc kubenswrapper[4910]: I0105 23:38:33.649958 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2sdfk" Jan 05 23:38:33 crc kubenswrapper[4910]: I0105 23:38:33.678400 4910 scope.go:117] "RemoveContainer" containerID="bdf7be55eff7972ce6c2cc61eb670e58d9cde1fef36aef5c78da3f3d8ce10c3b" Jan 05 23:38:33 crc kubenswrapper[4910]: I0105 23:38:33.699853 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2sdfk"] Jan 05 23:38:33 crc kubenswrapper[4910]: I0105 23:38:33.712987 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-2sdfk"] Jan 05 23:38:33 crc kubenswrapper[4910]: I0105 23:38:33.736663 4910 scope.go:117] "RemoveContainer" containerID="8df2fc279c8cd688b11dd85691ba0d7818ae59e66769142e9e9ef34b3479c735" Jan 05 23:38:33 crc kubenswrapper[4910]: I0105 23:38:33.771539 4910 scope.go:117] "RemoveContainer" containerID="9a98a3260fa3425fab3b392473083930730a5018fb9a91f62c719668429657f3" Jan 05 23:38:33 crc kubenswrapper[4910]: E0105 23:38:33.772389 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a98a3260fa3425fab3b392473083930730a5018fb9a91f62c719668429657f3\": container with ID starting with 9a98a3260fa3425fab3b392473083930730a5018fb9a91f62c719668429657f3 not found: ID does not exist" containerID="9a98a3260fa3425fab3b392473083930730a5018fb9a91f62c719668429657f3" Jan 05 23:38:33 crc kubenswrapper[4910]: I0105 23:38:33.772435 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a98a3260fa3425fab3b392473083930730a5018fb9a91f62c719668429657f3"} err="failed to get container status \"9a98a3260fa3425fab3b392473083930730a5018fb9a91f62c719668429657f3\": rpc error: code = NotFound desc = could not find container \"9a98a3260fa3425fab3b392473083930730a5018fb9a91f62c719668429657f3\": container with ID starting with 9a98a3260fa3425fab3b392473083930730a5018fb9a91f62c719668429657f3 not found: ID does not exist" Jan 05 23:38:33 crc kubenswrapper[4910]: I0105 23:38:33.772470 4910 scope.go:117] "RemoveContainer" containerID="bdf7be55eff7972ce6c2cc61eb670e58d9cde1fef36aef5c78da3f3d8ce10c3b" Jan 05 23:38:33 crc kubenswrapper[4910]: E0105 23:38:33.773004 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bdf7be55eff7972ce6c2cc61eb670e58d9cde1fef36aef5c78da3f3d8ce10c3b\": container with ID starting with bdf7be55eff7972ce6c2cc61eb670e58d9cde1fef36aef5c78da3f3d8ce10c3b not found: ID does not exist" containerID="bdf7be55eff7972ce6c2cc61eb670e58d9cde1fef36aef5c78da3f3d8ce10c3b" Jan 05 23:38:33 crc kubenswrapper[4910]: I0105 23:38:33.773043 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bdf7be55eff7972ce6c2cc61eb670e58d9cde1fef36aef5c78da3f3d8ce10c3b"} err="failed to get container status \"bdf7be55eff7972ce6c2cc61eb670e58d9cde1fef36aef5c78da3f3d8ce10c3b\": rpc error: code = NotFound desc = could not find container \"bdf7be55eff7972ce6c2cc61eb670e58d9cde1fef36aef5c78da3f3d8ce10c3b\": container with ID starting with bdf7be55eff7972ce6c2cc61eb670e58d9cde1fef36aef5c78da3f3d8ce10c3b not found: ID does not exist" Jan 05 23:38:33 crc kubenswrapper[4910]: I0105 23:38:33.773069 4910 scope.go:117] "RemoveContainer" containerID="8df2fc279c8cd688b11dd85691ba0d7818ae59e66769142e9e9ef34b3479c735" Jan 05 23:38:33 crc kubenswrapper[4910]: E0105 23:38:33.773603 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8df2fc279c8cd688b11dd85691ba0d7818ae59e66769142e9e9ef34b3479c735\": container with ID starting with 8df2fc279c8cd688b11dd85691ba0d7818ae59e66769142e9e9ef34b3479c735 not found: ID does not exist" containerID="8df2fc279c8cd688b11dd85691ba0d7818ae59e66769142e9e9ef34b3479c735" Jan 05 23:38:33 crc kubenswrapper[4910]: I0105 23:38:33.773627 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8df2fc279c8cd688b11dd85691ba0d7818ae59e66769142e9e9ef34b3479c735"} err="failed to get container status \"8df2fc279c8cd688b11dd85691ba0d7818ae59e66769142e9e9ef34b3479c735\": rpc error: code = NotFound desc = could not find container \"8df2fc279c8cd688b11dd85691ba0d7818ae59e66769142e9e9ef34b3479c735\": container with ID starting with 8df2fc279c8cd688b11dd85691ba0d7818ae59e66769142e9e9ef34b3479c735 not found: ID does not exist" Jan 05 23:38:34 crc kubenswrapper[4910]: I0105 23:38:34.740694 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba1ede8e-bc65-45f9-828c-1b440cfe9c98" path="/var/lib/kubelet/pods/ba1ede8e-bc65-45f9-828c-1b440cfe9c98/volumes" Jan 05 23:38:40 crc kubenswrapper[4910]: I0105 23:38:40.952446 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 23:38:40 crc kubenswrapper[4910]: I0105 23:38:40.953654 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 23:38:46 crc kubenswrapper[4910]: I0105 23:38:46.064927 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-db-create-7hvhq"] Jan 05 23:38:46 crc kubenswrapper[4910]: I0105 23:38:46.077714 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-db-create-7hvhq"] Jan 05 23:38:46 crc kubenswrapper[4910]: I0105 23:38:46.744219 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c52cd7ca-bfd0-4427-96c8-2bb374ac756d" path="/var/lib/kubelet/pods/c52cd7ca-bfd0-4427-96c8-2bb374ac756d/volumes" Jan 05 23:38:48 crc kubenswrapper[4910]: I0105 23:38:48.069187 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-5828-account-create-update-f98gq"] Jan 05 23:38:48 crc kubenswrapper[4910]: I0105 23:38:48.076323 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-5828-account-create-update-f98gq"] Jan 05 23:38:48 crc kubenswrapper[4910]: I0105 23:38:48.738185 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4f2da7e-a606-4a4e-bfed-8605e842ccbc" path="/var/lib/kubelet/pods/b4f2da7e-a606-4a4e-bfed-8605e842ccbc/volumes" Jan 05 23:38:49 crc kubenswrapper[4910]: I0105 23:38:49.226662 4910 scope.go:117] "RemoveContainer" containerID="a434e15b571fd7736aae4419c8f461fdf4b437a2181e36fd4d09cb5f516b7895" Jan 05 23:38:49 crc kubenswrapper[4910]: I0105 23:38:49.263716 4910 scope.go:117] "RemoveContainer" containerID="7a6156750d98992bba0b583a4ff8f76dd35d2da6b068aec266d478435baf5ef8" Jan 05 23:38:53 crc kubenswrapper[4910]: I0105 23:38:53.031116 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-persistence-db-create-6vb2b"] Jan 05 23:38:53 crc kubenswrapper[4910]: I0105 23:38:53.038231 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-persistence-db-create-6vb2b"] Jan 05 23:38:54 crc kubenswrapper[4910]: I0105 23:38:54.068212 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-c475-account-create-update-7gdht"] Jan 05 23:38:54 crc kubenswrapper[4910]: I0105 23:38:54.090623 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-c475-account-create-update-7gdht"] Jan 05 23:38:54 crc kubenswrapper[4910]: I0105 23:38:54.736104 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="437835eb-8eda-421e-8346-f8068f46e658" path="/var/lib/kubelet/pods/437835eb-8eda-421e-8346-f8068f46e658/volumes" Jan 05 23:38:54 crc kubenswrapper[4910]: I0105 23:38:54.737887 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e077484e-3a30-46f8-abf2-dc5d267fc72a" path="/var/lib/kubelet/pods/e077484e-3a30-46f8-abf2-dc5d267fc72a/volumes" Jan 05 23:39:10 crc kubenswrapper[4910]: I0105 23:39:10.952824 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 23:39:10 crc kubenswrapper[4910]: I0105 23:39:10.953433 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 23:39:18 crc kubenswrapper[4910]: I0105 23:39:18.613729 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-566mg/must-gather-5tnhd"] Jan 05 23:39:18 crc kubenswrapper[4910]: E0105 23:39:18.614682 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba1ede8e-bc65-45f9-828c-1b440cfe9c98" containerName="extract-utilities" Jan 05 23:39:18 crc kubenswrapper[4910]: I0105 23:39:18.614699 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba1ede8e-bc65-45f9-828c-1b440cfe9c98" containerName="extract-utilities" Jan 05 23:39:18 crc kubenswrapper[4910]: E0105 23:39:18.614722 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba1ede8e-bc65-45f9-828c-1b440cfe9c98" containerName="registry-server" Jan 05 23:39:18 crc kubenswrapper[4910]: I0105 23:39:18.614728 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba1ede8e-bc65-45f9-828c-1b440cfe9c98" containerName="registry-server" Jan 05 23:39:18 crc kubenswrapper[4910]: E0105 23:39:18.614743 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba1ede8e-bc65-45f9-828c-1b440cfe9c98" containerName="extract-content" Jan 05 23:39:18 crc kubenswrapper[4910]: I0105 23:39:18.614749 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba1ede8e-bc65-45f9-828c-1b440cfe9c98" containerName="extract-content" Jan 05 23:39:18 crc kubenswrapper[4910]: I0105 23:39:18.614965 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba1ede8e-bc65-45f9-828c-1b440cfe9c98" containerName="registry-server" Jan 05 23:39:18 crc kubenswrapper[4910]: I0105 23:39:18.617597 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-566mg/must-gather-5tnhd" Jan 05 23:39:18 crc kubenswrapper[4910]: I0105 23:39:18.619935 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-566mg"/"default-dockercfg-cb2vs" Jan 05 23:39:18 crc kubenswrapper[4910]: I0105 23:39:18.620222 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-566mg"/"kube-root-ca.crt" Jan 05 23:39:18 crc kubenswrapper[4910]: I0105 23:39:18.621144 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-566mg"/"openshift-service-ca.crt" Jan 05 23:39:18 crc kubenswrapper[4910]: I0105 23:39:18.632671 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-566mg/must-gather-5tnhd"] Jan 05 23:39:18 crc kubenswrapper[4910]: I0105 23:39:18.789504 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sdqhc\" (UniqueName: \"kubernetes.io/projected/3703c96e-c6c7-4742-932f-9943b276b9d2-kube-api-access-sdqhc\") pod \"must-gather-5tnhd\" (UID: \"3703c96e-c6c7-4742-932f-9943b276b9d2\") " pod="openshift-must-gather-566mg/must-gather-5tnhd" Jan 05 23:39:18 crc kubenswrapper[4910]: I0105 23:39:18.789602 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3703c96e-c6c7-4742-932f-9943b276b9d2-must-gather-output\") pod \"must-gather-5tnhd\" (UID: \"3703c96e-c6c7-4742-932f-9943b276b9d2\") " pod="openshift-must-gather-566mg/must-gather-5tnhd" Jan 05 23:39:18 crc kubenswrapper[4910]: I0105 23:39:18.891815 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sdqhc\" (UniqueName: \"kubernetes.io/projected/3703c96e-c6c7-4742-932f-9943b276b9d2-kube-api-access-sdqhc\") pod \"must-gather-5tnhd\" (UID: \"3703c96e-c6c7-4742-932f-9943b276b9d2\") " pod="openshift-must-gather-566mg/must-gather-5tnhd" Jan 05 23:39:18 crc kubenswrapper[4910]: I0105 23:39:18.891892 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3703c96e-c6c7-4742-932f-9943b276b9d2-must-gather-output\") pod \"must-gather-5tnhd\" (UID: \"3703c96e-c6c7-4742-932f-9943b276b9d2\") " pod="openshift-must-gather-566mg/must-gather-5tnhd" Jan 05 23:39:18 crc kubenswrapper[4910]: I0105 23:39:18.892394 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3703c96e-c6c7-4742-932f-9943b276b9d2-must-gather-output\") pod \"must-gather-5tnhd\" (UID: \"3703c96e-c6c7-4742-932f-9943b276b9d2\") " pod="openshift-must-gather-566mg/must-gather-5tnhd" Jan 05 23:39:18 crc kubenswrapper[4910]: I0105 23:39:18.917170 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sdqhc\" (UniqueName: \"kubernetes.io/projected/3703c96e-c6c7-4742-932f-9943b276b9d2-kube-api-access-sdqhc\") pod \"must-gather-5tnhd\" (UID: \"3703c96e-c6c7-4742-932f-9943b276b9d2\") " pod="openshift-must-gather-566mg/must-gather-5tnhd" Jan 05 23:39:18 crc kubenswrapper[4910]: I0105 23:39:18.934259 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-566mg/must-gather-5tnhd" Jan 05 23:39:19 crc kubenswrapper[4910]: W0105 23:39:19.454501 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3703c96e_c6c7_4742_932f_9943b276b9d2.slice/crio-4296c791605c99d49c494edb37cc68ddd11aa067665714797e0919551700ed45 WatchSource:0}: Error finding container 4296c791605c99d49c494edb37cc68ddd11aa067665714797e0919551700ed45: Status 404 returned error can't find the container with id 4296c791605c99d49c494edb37cc68ddd11aa067665714797e0919551700ed45 Jan 05 23:39:19 crc kubenswrapper[4910]: I0105 23:39:19.459559 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-566mg/must-gather-5tnhd"] Jan 05 23:39:20 crc kubenswrapper[4910]: I0105 23:39:20.227411 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-566mg/must-gather-5tnhd" event={"ID":"3703c96e-c6c7-4742-932f-9943b276b9d2","Type":"ContainerStarted","Data":"4296c791605c99d49c494edb37cc68ddd11aa067665714797e0919551700ed45"} Jan 05 23:39:26 crc kubenswrapper[4910]: I0105 23:39:26.064464 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-db-sync-fdhs8"] Jan 05 23:39:26 crc kubenswrapper[4910]: I0105 23:39:26.081228 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-db-sync-fdhs8"] Jan 05 23:39:26 crc kubenswrapper[4910]: I0105 23:39:26.743460 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f" path="/var/lib/kubelet/pods/0a5a592c-13c1-4fe0-bb2e-b75bc57a4d1f/volumes" Jan 05 23:39:28 crc kubenswrapper[4910]: I0105 23:39:28.343283 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-566mg/must-gather-5tnhd" event={"ID":"3703c96e-c6c7-4742-932f-9943b276b9d2","Type":"ContainerStarted","Data":"25439afa7f57a4751d807be1e94cba4de39d94a1fd757b1cfa13fb42ab361431"} Jan 05 23:39:28 crc kubenswrapper[4910]: I0105 23:39:28.343831 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-566mg/must-gather-5tnhd" event={"ID":"3703c96e-c6c7-4742-932f-9943b276b9d2","Type":"ContainerStarted","Data":"20b06b0cc7eb7acb35fe07962414914ec1f233139a9836f96132f41c9cf1b72f"} Jan 05 23:39:28 crc kubenswrapper[4910]: I0105 23:39:28.371139 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-566mg/must-gather-5tnhd" podStartSLOduration=2.34607848 podStartE2EDuration="10.371105834s" podCreationTimestamp="2026-01-05 23:39:18 +0000 UTC" firstStartedPulling="2026-01-05 23:39:19.456909127 +0000 UTC m=+6491.034406787" lastFinishedPulling="2026-01-05 23:39:27.481936471 +0000 UTC m=+6499.059434141" observedRunningTime="2026-01-05 23:39:28.36464364 +0000 UTC m=+6499.942141310" watchObservedRunningTime="2026-01-05 23:39:28.371105834 +0000 UTC m=+6499.948603494" Jan 05 23:39:30 crc kubenswrapper[4910]: E0105 23:39:30.528143 4910 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.166:42046->38.102.83.166:40365: write tcp 38.102.83.166:42046->38.102.83.166:40365: write: broken pipe Jan 05 23:39:32 crc kubenswrapper[4910]: I0105 23:39:32.148360 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-566mg/crc-debug-7npm8"] Jan 05 23:39:32 crc kubenswrapper[4910]: I0105 23:39:32.151112 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-566mg/crc-debug-7npm8" Jan 05 23:39:32 crc kubenswrapper[4910]: I0105 23:39:32.336467 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/716ed5c1-02b0-4c5e-bb78-bf33d34b6164-host\") pod \"crc-debug-7npm8\" (UID: \"716ed5c1-02b0-4c5e-bb78-bf33d34b6164\") " pod="openshift-must-gather-566mg/crc-debug-7npm8" Jan 05 23:39:32 crc kubenswrapper[4910]: I0105 23:39:32.337026 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rqfv6\" (UniqueName: \"kubernetes.io/projected/716ed5c1-02b0-4c5e-bb78-bf33d34b6164-kube-api-access-rqfv6\") pod \"crc-debug-7npm8\" (UID: \"716ed5c1-02b0-4c5e-bb78-bf33d34b6164\") " pod="openshift-must-gather-566mg/crc-debug-7npm8" Jan 05 23:39:32 crc kubenswrapper[4910]: I0105 23:39:32.438545 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/716ed5c1-02b0-4c5e-bb78-bf33d34b6164-host\") pod \"crc-debug-7npm8\" (UID: \"716ed5c1-02b0-4c5e-bb78-bf33d34b6164\") " pod="openshift-must-gather-566mg/crc-debug-7npm8" Jan 05 23:39:32 crc kubenswrapper[4910]: I0105 23:39:32.438717 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rqfv6\" (UniqueName: \"kubernetes.io/projected/716ed5c1-02b0-4c5e-bb78-bf33d34b6164-kube-api-access-rqfv6\") pod \"crc-debug-7npm8\" (UID: \"716ed5c1-02b0-4c5e-bb78-bf33d34b6164\") " pod="openshift-must-gather-566mg/crc-debug-7npm8" Jan 05 23:39:32 crc kubenswrapper[4910]: I0105 23:39:32.438716 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/716ed5c1-02b0-4c5e-bb78-bf33d34b6164-host\") pod \"crc-debug-7npm8\" (UID: \"716ed5c1-02b0-4c5e-bb78-bf33d34b6164\") " pod="openshift-must-gather-566mg/crc-debug-7npm8" Jan 05 23:39:32 crc kubenswrapper[4910]: I0105 23:39:32.486670 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rqfv6\" (UniqueName: \"kubernetes.io/projected/716ed5c1-02b0-4c5e-bb78-bf33d34b6164-kube-api-access-rqfv6\") pod \"crc-debug-7npm8\" (UID: \"716ed5c1-02b0-4c5e-bb78-bf33d34b6164\") " pod="openshift-must-gather-566mg/crc-debug-7npm8" Jan 05 23:39:32 crc kubenswrapper[4910]: I0105 23:39:32.775803 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-566mg/crc-debug-7npm8" Jan 05 23:39:33 crc kubenswrapper[4910]: I0105 23:39:33.412277 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-566mg/crc-debug-7npm8" event={"ID":"716ed5c1-02b0-4c5e-bb78-bf33d34b6164","Type":"ContainerStarted","Data":"24e2ab985e61eabf73c4127d3c02bbe2f1aa17ca4c97fd444f739f38be4626b8"} Jan 05 23:39:40 crc kubenswrapper[4910]: I0105 23:39:40.952314 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 23:39:40 crc kubenswrapper[4910]: I0105 23:39:40.953017 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 23:39:40 crc kubenswrapper[4910]: I0105 23:39:40.953075 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 23:39:40 crc kubenswrapper[4910]: I0105 23:39:40.955181 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"85efc8de819de3d45b60cf4b26ac6a5b91b06bbd1c65b576dff5063a93cada55"} pod="openshift-machine-config-operator/machine-config-daemon-p4t85" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 05 23:39:40 crc kubenswrapper[4910]: I0105 23:39:40.955261 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" containerID="cri-o://85efc8de819de3d45b60cf4b26ac6a5b91b06bbd1c65b576dff5063a93cada55" gracePeriod=600 Jan 05 23:39:41 crc kubenswrapper[4910]: I0105 23:39:41.511207 4910 generic.go:334] "Generic (PLEG): container finished" podID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerID="85efc8de819de3d45b60cf4b26ac6a5b91b06bbd1c65b576dff5063a93cada55" exitCode=0 Jan 05 23:39:41 crc kubenswrapper[4910]: I0105 23:39:41.511354 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerDied","Data":"85efc8de819de3d45b60cf4b26ac6a5b91b06bbd1c65b576dff5063a93cada55"} Jan 05 23:39:41 crc kubenswrapper[4910]: I0105 23:39:41.511488 4910 scope.go:117] "RemoveContainer" containerID="f0334e4359831541c2f90c0defd8867b10c126be0235edac4acd76d34a0ba514" Jan 05 23:39:44 crc kubenswrapper[4910]: I0105 23:39:44.541561 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerStarted","Data":"c8796ba4f20d5ffb2bfca13c70f774542d4b41084b1e10019bcdae211e0ff9e8"} Jan 05 23:39:44 crc kubenswrapper[4910]: I0105 23:39:44.543226 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-566mg/crc-debug-7npm8" event={"ID":"716ed5c1-02b0-4c5e-bb78-bf33d34b6164","Type":"ContainerStarted","Data":"afa59add98b7b1fe389c3b460eee7d5e9c32394f1568f4ba027a8a8c25d7acea"} Jan 05 23:39:44 crc kubenswrapper[4910]: I0105 23:39:44.614011 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-566mg/crc-debug-7npm8" podStartSLOduration=1.615078772 podStartE2EDuration="12.613994384s" podCreationTimestamp="2026-01-05 23:39:32 +0000 UTC" firstStartedPulling="2026-01-05 23:39:32.842157812 +0000 UTC m=+6504.419655472" lastFinishedPulling="2026-01-05 23:39:43.841073424 +0000 UTC m=+6515.418571084" observedRunningTime="2026-01-05 23:39:44.60965696 +0000 UTC m=+6516.187154650" watchObservedRunningTime="2026-01-05 23:39:44.613994384 +0000 UTC m=+6516.191492054" Jan 05 23:39:49 crc kubenswrapper[4910]: I0105 23:39:49.424885 4910 scope.go:117] "RemoveContainer" containerID="810574ec4c93b14b089672e0b6a92f77a23905799ba106662d8606d683771008" Jan 05 23:39:51 crc kubenswrapper[4910]: I0105 23:39:51.698981 4910 scope.go:117] "RemoveContainer" containerID="fb984cee4982f54b303e7582ed586d6a948cfe4da81d6f22e3a8e049b5dd583b" Jan 05 23:39:51 crc kubenswrapper[4910]: I0105 23:39:51.779146 4910 scope.go:117] "RemoveContainer" containerID="3b774d1715ddc3b1baa2080e164b94526f5e6509f680439a5170e186f92bf7e5" Jan 05 23:39:51 crc kubenswrapper[4910]: I0105 23:39:51.817421 4910 scope.go:117] "RemoveContainer" containerID="042a784e7eaa163c9f4f4f613360a1c8267f39dcc4fd02190dccf1399d2a6b25" Jan 05 23:40:00 crc kubenswrapper[4910]: I0105 23:40:00.713240 4910 generic.go:334] "Generic (PLEG): container finished" podID="716ed5c1-02b0-4c5e-bb78-bf33d34b6164" containerID="afa59add98b7b1fe389c3b460eee7d5e9c32394f1568f4ba027a8a8c25d7acea" exitCode=0 Jan 05 23:40:00 crc kubenswrapper[4910]: I0105 23:40:00.713361 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-566mg/crc-debug-7npm8" event={"ID":"716ed5c1-02b0-4c5e-bb78-bf33d34b6164","Type":"ContainerDied","Data":"afa59add98b7b1fe389c3b460eee7d5e9c32394f1568f4ba027a8a8c25d7acea"} Jan 05 23:40:01 crc kubenswrapper[4910]: I0105 23:40:01.849593 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-566mg/crc-debug-7npm8" Jan 05 23:40:01 crc kubenswrapper[4910]: I0105 23:40:01.889924 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-566mg/crc-debug-7npm8"] Jan 05 23:40:01 crc kubenswrapper[4910]: I0105 23:40:01.902249 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-566mg/crc-debug-7npm8"] Jan 05 23:40:01 crc kubenswrapper[4910]: I0105 23:40:01.909946 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rqfv6\" (UniqueName: \"kubernetes.io/projected/716ed5c1-02b0-4c5e-bb78-bf33d34b6164-kube-api-access-rqfv6\") pod \"716ed5c1-02b0-4c5e-bb78-bf33d34b6164\" (UID: \"716ed5c1-02b0-4c5e-bb78-bf33d34b6164\") " Jan 05 23:40:01 crc kubenswrapper[4910]: I0105 23:40:01.911583 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/716ed5c1-02b0-4c5e-bb78-bf33d34b6164-host\") pod \"716ed5c1-02b0-4c5e-bb78-bf33d34b6164\" (UID: \"716ed5c1-02b0-4c5e-bb78-bf33d34b6164\") " Jan 05 23:40:01 crc kubenswrapper[4910]: I0105 23:40:01.911686 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/716ed5c1-02b0-4c5e-bb78-bf33d34b6164-host" (OuterVolumeSpecName: "host") pod "716ed5c1-02b0-4c5e-bb78-bf33d34b6164" (UID: "716ed5c1-02b0-4c5e-bb78-bf33d34b6164"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 23:40:01 crc kubenswrapper[4910]: I0105 23:40:01.912580 4910 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/716ed5c1-02b0-4c5e-bb78-bf33d34b6164-host\") on node \"crc\" DevicePath \"\"" Jan 05 23:40:01 crc kubenswrapper[4910]: I0105 23:40:01.915420 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/716ed5c1-02b0-4c5e-bb78-bf33d34b6164-kube-api-access-rqfv6" (OuterVolumeSpecName: "kube-api-access-rqfv6") pod "716ed5c1-02b0-4c5e-bb78-bf33d34b6164" (UID: "716ed5c1-02b0-4c5e-bb78-bf33d34b6164"). InnerVolumeSpecName "kube-api-access-rqfv6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:40:02 crc kubenswrapper[4910]: I0105 23:40:02.014987 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rqfv6\" (UniqueName: \"kubernetes.io/projected/716ed5c1-02b0-4c5e-bb78-bf33d34b6164-kube-api-access-rqfv6\") on node \"crc\" DevicePath \"\"" Jan 05 23:40:02 crc kubenswrapper[4910]: I0105 23:40:02.736158 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="716ed5c1-02b0-4c5e-bb78-bf33d34b6164" path="/var/lib/kubelet/pods/716ed5c1-02b0-4c5e-bb78-bf33d34b6164/volumes" Jan 05 23:40:02 crc kubenswrapper[4910]: I0105 23:40:02.750530 4910 scope.go:117] "RemoveContainer" containerID="afa59add98b7b1fe389c3b460eee7d5e9c32394f1568f4ba027a8a8c25d7acea" Jan 05 23:40:02 crc kubenswrapper[4910]: I0105 23:40:02.750672 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-566mg/crc-debug-7npm8" Jan 05 23:40:03 crc kubenswrapper[4910]: I0105 23:40:03.086819 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-566mg/crc-debug-f2hhl"] Jan 05 23:40:03 crc kubenswrapper[4910]: E0105 23:40:03.088479 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="716ed5c1-02b0-4c5e-bb78-bf33d34b6164" containerName="container-00" Jan 05 23:40:03 crc kubenswrapper[4910]: I0105 23:40:03.088494 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="716ed5c1-02b0-4c5e-bb78-bf33d34b6164" containerName="container-00" Jan 05 23:40:03 crc kubenswrapper[4910]: I0105 23:40:03.089026 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="716ed5c1-02b0-4c5e-bb78-bf33d34b6164" containerName="container-00" Jan 05 23:40:03 crc kubenswrapper[4910]: I0105 23:40:03.089841 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-566mg/crc-debug-f2hhl" Jan 05 23:40:03 crc kubenswrapper[4910]: I0105 23:40:03.141713 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6q2c\" (UniqueName: \"kubernetes.io/projected/c3627552-2637-4500-a757-472ad05ae4a3-kube-api-access-l6q2c\") pod \"crc-debug-f2hhl\" (UID: \"c3627552-2637-4500-a757-472ad05ae4a3\") " pod="openshift-must-gather-566mg/crc-debug-f2hhl" Jan 05 23:40:03 crc kubenswrapper[4910]: I0105 23:40:03.141771 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c3627552-2637-4500-a757-472ad05ae4a3-host\") pod \"crc-debug-f2hhl\" (UID: \"c3627552-2637-4500-a757-472ad05ae4a3\") " pod="openshift-must-gather-566mg/crc-debug-f2hhl" Jan 05 23:40:03 crc kubenswrapper[4910]: I0105 23:40:03.243800 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l6q2c\" (UniqueName: \"kubernetes.io/projected/c3627552-2637-4500-a757-472ad05ae4a3-kube-api-access-l6q2c\") pod \"crc-debug-f2hhl\" (UID: \"c3627552-2637-4500-a757-472ad05ae4a3\") " pod="openshift-must-gather-566mg/crc-debug-f2hhl" Jan 05 23:40:03 crc kubenswrapper[4910]: I0105 23:40:03.243855 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c3627552-2637-4500-a757-472ad05ae4a3-host\") pod \"crc-debug-f2hhl\" (UID: \"c3627552-2637-4500-a757-472ad05ae4a3\") " pod="openshift-must-gather-566mg/crc-debug-f2hhl" Jan 05 23:40:03 crc kubenswrapper[4910]: I0105 23:40:03.244011 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c3627552-2637-4500-a757-472ad05ae4a3-host\") pod \"crc-debug-f2hhl\" (UID: \"c3627552-2637-4500-a757-472ad05ae4a3\") " pod="openshift-must-gather-566mg/crc-debug-f2hhl" Jan 05 23:40:03 crc kubenswrapper[4910]: I0105 23:40:03.289924 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6q2c\" (UniqueName: \"kubernetes.io/projected/c3627552-2637-4500-a757-472ad05ae4a3-kube-api-access-l6q2c\") pod \"crc-debug-f2hhl\" (UID: \"c3627552-2637-4500-a757-472ad05ae4a3\") " pod="openshift-must-gather-566mg/crc-debug-f2hhl" Jan 05 23:40:03 crc kubenswrapper[4910]: I0105 23:40:03.416494 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-566mg/crc-debug-f2hhl" Jan 05 23:40:03 crc kubenswrapper[4910]: W0105 23:40:03.499638 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc3627552_2637_4500_a757_472ad05ae4a3.slice/crio-01f3d95d3a7f671db0d56d2c04778c17ecf2245c8db5e26eaecbfef1790e0be6 WatchSource:0}: Error finding container 01f3d95d3a7f671db0d56d2c04778c17ecf2245c8db5e26eaecbfef1790e0be6: Status 404 returned error can't find the container with id 01f3d95d3a7f671db0d56d2c04778c17ecf2245c8db5e26eaecbfef1790e0be6 Jan 05 23:40:03 crc kubenswrapper[4910]: I0105 23:40:03.766324 4910 generic.go:334] "Generic (PLEG): container finished" podID="c3627552-2637-4500-a757-472ad05ae4a3" containerID="1adfbae850f22bb19ec011af93ed48543a270a25762457bfa9b23d232e15cb29" exitCode=1 Jan 05 23:40:03 crc kubenswrapper[4910]: I0105 23:40:03.766426 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-566mg/crc-debug-f2hhl" event={"ID":"c3627552-2637-4500-a757-472ad05ae4a3","Type":"ContainerDied","Data":"1adfbae850f22bb19ec011af93ed48543a270a25762457bfa9b23d232e15cb29"} Jan 05 23:40:03 crc kubenswrapper[4910]: I0105 23:40:03.766695 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-566mg/crc-debug-f2hhl" event={"ID":"c3627552-2637-4500-a757-472ad05ae4a3","Type":"ContainerStarted","Data":"01f3d95d3a7f671db0d56d2c04778c17ecf2245c8db5e26eaecbfef1790e0be6"} Jan 05 23:40:03 crc kubenswrapper[4910]: I0105 23:40:03.814170 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-566mg/crc-debug-f2hhl"] Jan 05 23:40:03 crc kubenswrapper[4910]: I0105 23:40:03.828297 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-566mg/crc-debug-f2hhl"] Jan 05 23:40:04 crc kubenswrapper[4910]: I0105 23:40:04.903603 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-566mg/crc-debug-f2hhl" Jan 05 23:40:04 crc kubenswrapper[4910]: I0105 23:40:04.984315 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l6q2c\" (UniqueName: \"kubernetes.io/projected/c3627552-2637-4500-a757-472ad05ae4a3-kube-api-access-l6q2c\") pod \"c3627552-2637-4500-a757-472ad05ae4a3\" (UID: \"c3627552-2637-4500-a757-472ad05ae4a3\") " Jan 05 23:40:04 crc kubenswrapper[4910]: I0105 23:40:04.984388 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c3627552-2637-4500-a757-472ad05ae4a3-host\") pod \"c3627552-2637-4500-a757-472ad05ae4a3\" (UID: \"c3627552-2637-4500-a757-472ad05ae4a3\") " Jan 05 23:40:04 crc kubenswrapper[4910]: I0105 23:40:04.984553 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c3627552-2637-4500-a757-472ad05ae4a3-host" (OuterVolumeSpecName: "host") pod "c3627552-2637-4500-a757-472ad05ae4a3" (UID: "c3627552-2637-4500-a757-472ad05ae4a3"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 05 23:40:04 crc kubenswrapper[4910]: I0105 23:40:04.985767 4910 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c3627552-2637-4500-a757-472ad05ae4a3-host\") on node \"crc\" DevicePath \"\"" Jan 05 23:40:04 crc kubenswrapper[4910]: I0105 23:40:04.992161 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c3627552-2637-4500-a757-472ad05ae4a3-kube-api-access-l6q2c" (OuterVolumeSpecName: "kube-api-access-l6q2c") pod "c3627552-2637-4500-a757-472ad05ae4a3" (UID: "c3627552-2637-4500-a757-472ad05ae4a3"). InnerVolumeSpecName "kube-api-access-l6q2c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:40:05 crc kubenswrapper[4910]: I0105 23:40:05.088215 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l6q2c\" (UniqueName: \"kubernetes.io/projected/c3627552-2637-4500-a757-472ad05ae4a3-kube-api-access-l6q2c\") on node \"crc\" DevicePath \"\"" Jan 05 23:40:05 crc kubenswrapper[4910]: I0105 23:40:05.796940 4910 scope.go:117] "RemoveContainer" containerID="1adfbae850f22bb19ec011af93ed48543a270a25762457bfa9b23d232e15cb29" Jan 05 23:40:05 crc kubenswrapper[4910]: I0105 23:40:05.797091 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-566mg/crc-debug-f2hhl" Jan 05 23:40:06 crc kubenswrapper[4910]: I0105 23:40:06.743160 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c3627552-2637-4500-a757-472ad05ae4a3" path="/var/lib/kubelet/pods/c3627552-2637-4500-a757-472ad05ae4a3/volumes" Jan 05 23:40:54 crc kubenswrapper[4910]: I0105 23:40:54.634532 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2mvns"] Jan 05 23:40:54 crc kubenswrapper[4910]: E0105 23:40:54.636522 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3627552-2637-4500-a757-472ad05ae4a3" containerName="container-00" Jan 05 23:40:54 crc kubenswrapper[4910]: I0105 23:40:54.636543 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3627552-2637-4500-a757-472ad05ae4a3" containerName="container-00" Jan 05 23:40:54 crc kubenswrapper[4910]: I0105 23:40:54.636816 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3627552-2637-4500-a757-472ad05ae4a3" containerName="container-00" Jan 05 23:40:54 crc kubenswrapper[4910]: I0105 23:40:54.638894 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2mvns" Jan 05 23:40:54 crc kubenswrapper[4910]: I0105 23:40:54.650341 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2mvns"] Jan 05 23:40:54 crc kubenswrapper[4910]: I0105 23:40:54.736318 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c768ac3-99a9-414c-8b4a-cd3da396bae8-utilities\") pod \"certified-operators-2mvns\" (UID: \"1c768ac3-99a9-414c-8b4a-cd3da396bae8\") " pod="openshift-marketplace/certified-operators-2mvns" Jan 05 23:40:54 crc kubenswrapper[4910]: I0105 23:40:54.736470 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c768ac3-99a9-414c-8b4a-cd3da396bae8-catalog-content\") pod \"certified-operators-2mvns\" (UID: \"1c768ac3-99a9-414c-8b4a-cd3da396bae8\") " pod="openshift-marketplace/certified-operators-2mvns" Jan 05 23:40:54 crc kubenswrapper[4910]: I0105 23:40:54.736625 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9vq2f\" (UniqueName: \"kubernetes.io/projected/1c768ac3-99a9-414c-8b4a-cd3da396bae8-kube-api-access-9vq2f\") pod \"certified-operators-2mvns\" (UID: \"1c768ac3-99a9-414c-8b4a-cd3da396bae8\") " pod="openshift-marketplace/certified-operators-2mvns" Jan 05 23:40:54 crc kubenswrapper[4910]: I0105 23:40:54.838105 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9vq2f\" (UniqueName: \"kubernetes.io/projected/1c768ac3-99a9-414c-8b4a-cd3da396bae8-kube-api-access-9vq2f\") pod \"certified-operators-2mvns\" (UID: \"1c768ac3-99a9-414c-8b4a-cd3da396bae8\") " pod="openshift-marketplace/certified-operators-2mvns" Jan 05 23:40:54 crc kubenswrapper[4910]: I0105 23:40:54.838173 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c768ac3-99a9-414c-8b4a-cd3da396bae8-utilities\") pod \"certified-operators-2mvns\" (UID: \"1c768ac3-99a9-414c-8b4a-cd3da396bae8\") " pod="openshift-marketplace/certified-operators-2mvns" Jan 05 23:40:54 crc kubenswrapper[4910]: I0105 23:40:54.838695 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c768ac3-99a9-414c-8b4a-cd3da396bae8-catalog-content\") pod \"certified-operators-2mvns\" (UID: \"1c768ac3-99a9-414c-8b4a-cd3da396bae8\") " pod="openshift-marketplace/certified-operators-2mvns" Jan 05 23:40:54 crc kubenswrapper[4910]: I0105 23:40:54.838823 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c768ac3-99a9-414c-8b4a-cd3da396bae8-utilities\") pod \"certified-operators-2mvns\" (UID: \"1c768ac3-99a9-414c-8b4a-cd3da396bae8\") " pod="openshift-marketplace/certified-operators-2mvns" Jan 05 23:40:54 crc kubenswrapper[4910]: I0105 23:40:54.839171 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c768ac3-99a9-414c-8b4a-cd3da396bae8-catalog-content\") pod \"certified-operators-2mvns\" (UID: \"1c768ac3-99a9-414c-8b4a-cd3da396bae8\") " pod="openshift-marketplace/certified-operators-2mvns" Jan 05 23:40:54 crc kubenswrapper[4910]: I0105 23:40:54.857250 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9vq2f\" (UniqueName: \"kubernetes.io/projected/1c768ac3-99a9-414c-8b4a-cd3da396bae8-kube-api-access-9vq2f\") pod \"certified-operators-2mvns\" (UID: \"1c768ac3-99a9-414c-8b4a-cd3da396bae8\") " pod="openshift-marketplace/certified-operators-2mvns" Jan 05 23:40:54 crc kubenswrapper[4910]: I0105 23:40:54.998384 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2mvns" Jan 05 23:40:55 crc kubenswrapper[4910]: I0105 23:40:55.492482 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2mvns"] Jan 05 23:40:56 crc kubenswrapper[4910]: I0105 23:40:56.379372 4910 generic.go:334] "Generic (PLEG): container finished" podID="1c768ac3-99a9-414c-8b4a-cd3da396bae8" containerID="976b5431749cf859486d2c685e3dd975cd571b59d8a353242fa95597deaf6bde" exitCode=0 Jan 05 23:40:56 crc kubenswrapper[4910]: I0105 23:40:56.379471 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2mvns" event={"ID":"1c768ac3-99a9-414c-8b4a-cd3da396bae8","Type":"ContainerDied","Data":"976b5431749cf859486d2c685e3dd975cd571b59d8a353242fa95597deaf6bde"} Jan 05 23:40:56 crc kubenswrapper[4910]: I0105 23:40:56.379640 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2mvns" event={"ID":"1c768ac3-99a9-414c-8b4a-cd3da396bae8","Type":"ContainerStarted","Data":"d031a522b43a5aecdb11e6702ee9d6aad345ccaa576e8fd0185310ab7302cfd2"} Jan 05 23:40:57 crc kubenswrapper[4910]: I0105 23:40:57.392233 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2mvns" event={"ID":"1c768ac3-99a9-414c-8b4a-cd3da396bae8","Type":"ContainerStarted","Data":"8a02c9fae5050a5116a8146bcb001d5c615ba3e9637d3490481c496e871db942"} Jan 05 23:40:58 crc kubenswrapper[4910]: I0105 23:40:58.404822 4910 generic.go:334] "Generic (PLEG): container finished" podID="1c768ac3-99a9-414c-8b4a-cd3da396bae8" containerID="8a02c9fae5050a5116a8146bcb001d5c615ba3e9637d3490481c496e871db942" exitCode=0 Jan 05 23:40:58 crc kubenswrapper[4910]: I0105 23:40:58.405003 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2mvns" event={"ID":"1c768ac3-99a9-414c-8b4a-cd3da396bae8","Type":"ContainerDied","Data":"8a02c9fae5050a5116a8146bcb001d5c615ba3e9637d3490481c496e871db942"} Jan 05 23:40:59 crc kubenswrapper[4910]: I0105 23:40:59.417681 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2mvns" event={"ID":"1c768ac3-99a9-414c-8b4a-cd3da396bae8","Type":"ContainerStarted","Data":"a7a994c767b4b21e901e8d180d2525e33ad803565727fa44cc1a5b8651ca8dea"} Jan 05 23:40:59 crc kubenswrapper[4910]: I0105 23:40:59.469252 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2mvns" podStartSLOduration=2.832759941 podStartE2EDuration="5.46922224s" podCreationTimestamp="2026-01-05 23:40:54 +0000 UTC" firstStartedPulling="2026-01-05 23:40:56.382051609 +0000 UTC m=+6587.959549309" lastFinishedPulling="2026-01-05 23:40:59.018513938 +0000 UTC m=+6590.596011608" observedRunningTime="2026-01-05 23:40:59.457590083 +0000 UTC m=+6591.035087763" watchObservedRunningTime="2026-01-05 23:40:59.46922224 +0000 UTC m=+6591.046719910" Jan 05 23:41:02 crc kubenswrapper[4910]: I0105 23:41:02.228202 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_b23e2877-fd92-437d-91c4-97e0391e9355/init-config-reloader/0.log" Jan 05 23:41:02 crc kubenswrapper[4910]: I0105 23:41:02.624105 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_b23e2877-fd92-437d-91c4-97e0391e9355/init-config-reloader/0.log" Jan 05 23:41:02 crc kubenswrapper[4910]: I0105 23:41:02.627259 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_b23e2877-fd92-437d-91c4-97e0391e9355/alertmanager/0.log" Jan 05 23:41:02 crc kubenswrapper[4910]: I0105 23:41:02.662474 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_b23e2877-fd92-437d-91c4-97e0391e9355/config-reloader/0.log" Jan 05 23:41:02 crc kubenswrapper[4910]: I0105 23:41:02.836372 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_6de8dea8-3e8e-4b14-8b24-51ce2a7952b2/aodh-evaluator/0.log" Jan 05 23:41:02 crc kubenswrapper[4910]: I0105 23:41:02.858560 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_6de8dea8-3e8e-4b14-8b24-51ce2a7952b2/aodh-api/0.log" Jan 05 23:41:03 crc kubenswrapper[4910]: I0105 23:41:03.035957 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_6de8dea8-3e8e-4b14-8b24-51ce2a7952b2/aodh-listener/0.log" Jan 05 23:41:03 crc kubenswrapper[4910]: I0105 23:41:03.064046 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_6de8dea8-3e8e-4b14-8b24-51ce2a7952b2/aodh-notifier/0.log" Jan 05 23:41:03 crc kubenswrapper[4910]: I0105 23:41:03.142176 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-5735-account-create-update-h89vj_36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8/mariadb-account-create-update/0.log" Jan 05 23:41:03 crc kubenswrapper[4910]: I0105 23:41:03.319109 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-db-create-f99t2_79b2932f-d96b-45db-bea6-e821af5a8388/mariadb-database-create/0.log" Jan 05 23:41:03 crc kubenswrapper[4910]: I0105 23:41:03.433380 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-db-sync-2w8sc_b2ff657b-9e47-472a-9ff1-eda124dd4db8/aodh-db-sync/0.log" Jan 05 23:41:03 crc kubenswrapper[4910]: I0105 23:41:03.576653 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6cdf5d85b6-8zbr5_af4ff390-1ab3-42d8-be1f-126a38d4b313/barbican-api/0.log" Jan 05 23:41:03 crc kubenswrapper[4910]: I0105 23:41:03.670471 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6cdf5d85b6-8zbr5_af4ff390-1ab3-42d8-be1f-126a38d4b313/barbican-api-log/0.log" Jan 05 23:41:03 crc kubenswrapper[4910]: I0105 23:41:03.757033 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-67d484f698-cn4m6_97fb16df-a478-4676-bab1-3eb2033abed6/barbican-keystone-listener/0.log" Jan 05 23:41:03 crc kubenswrapper[4910]: I0105 23:41:03.833487 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-67d484f698-cn4m6_97fb16df-a478-4676-bab1-3eb2033abed6/barbican-keystone-listener-log/0.log" Jan 05 23:41:03 crc kubenswrapper[4910]: I0105 23:41:03.959298 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-574b95949c-7xl5m_753baa24-890a-44bb-9e90-78afd9665bfa/barbican-worker/0.log" Jan 05 23:41:03 crc kubenswrapper[4910]: I0105 23:41:03.995390 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-574b95949c-7xl5m_753baa24-890a-44bb-9e90-78afd9665bfa/barbican-worker-log/0.log" Jan 05 23:41:04 crc kubenswrapper[4910]: I0105 23:41:04.171781 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_dcced35e-d7c2-4f85-9e90-16ab520684b3/ceilometer-central-agent/0.log" Jan 05 23:41:04 crc kubenswrapper[4910]: I0105 23:41:04.240060 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_dcced35e-d7c2-4f85-9e90-16ab520684b3/ceilometer-notification-agent/0.log" Jan 05 23:41:04 crc kubenswrapper[4910]: I0105 23:41:04.274707 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_dcced35e-d7c2-4f85-9e90-16ab520684b3/proxy-httpd/0.log" Jan 05 23:41:04 crc kubenswrapper[4910]: I0105 23:41:04.354414 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_dcced35e-d7c2-4f85-9e90-16ab520684b3/sg-core/0.log" Jan 05 23:41:04 crc kubenswrapper[4910]: I0105 23:41:04.434748 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_18130341-5ea1-4803-9b04-4d8ccb122828/cinder-api/0.log" Jan 05 23:41:04 crc kubenswrapper[4910]: I0105 23:41:04.497608 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_18130341-5ea1-4803-9b04-4d8ccb122828/cinder-api-log/0.log" Jan 05 23:41:04 crc kubenswrapper[4910]: I0105 23:41:04.688634 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_4cb60132-8c1f-4d0f-9582-32e551e2f4f9/probe/0.log" Jan 05 23:41:04 crc kubenswrapper[4910]: I0105 23:41:04.911739 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_4cb60132-8c1f-4d0f-9582-32e551e2f4f9/cinder-backup/0.log" Jan 05 23:41:04 crc kubenswrapper[4910]: I0105 23:41:04.927732 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_07d1adea-bd0b-4e0a-a673-b20a56d68a20/cinder-scheduler/0.log" Jan 05 23:41:04 crc kubenswrapper[4910]: I0105 23:41:04.978348 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_07d1adea-bd0b-4e0a-a673-b20a56d68a20/probe/0.log" Jan 05 23:41:04 crc kubenswrapper[4910]: I0105 23:41:04.998567 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2mvns" Jan 05 23:41:04 crc kubenswrapper[4910]: I0105 23:41:04.999592 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-2mvns" Jan 05 23:41:05 crc kubenswrapper[4910]: I0105 23:41:05.055670 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2mvns" Jan 05 23:41:05 crc kubenswrapper[4910]: I0105 23:41:05.391449 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe/cinder-volume/0.log" Jan 05 23:41:05 crc kubenswrapper[4910]: I0105 23:41:05.417687 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_9c5acd7b-7c52-4ff4-b9f4-1f80bb1e4fbe/probe/0.log" Jan 05 23:41:05 crc kubenswrapper[4910]: I0105 23:41:05.556213 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2mvns" Jan 05 23:41:05 crc kubenswrapper[4910]: I0105 23:41:05.581796 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-579d455669-7z4vm_0a3c74f1-a391-4472-9ff6-8d72a85f41d5/init/0.log" Jan 05 23:41:05 crc kubenswrapper[4910]: I0105 23:41:05.611756 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2mvns"] Jan 05 23:41:05 crc kubenswrapper[4910]: I0105 23:41:05.849167 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-579d455669-7z4vm_0a3c74f1-a391-4472-9ff6-8d72a85f41d5/dnsmasq-dns/0.log" Jan 05 23:41:05 crc kubenswrapper[4910]: I0105 23:41:05.861342 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-579d455669-7z4vm_0a3c74f1-a391-4472-9ff6-8d72a85f41d5/init/0.log" Jan 05 23:41:05 crc kubenswrapper[4910]: I0105 23:41:05.870035 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_47fd1e78-1916-411b-9841-a503c9fdc455/glance-httpd/0.log" Jan 05 23:41:06 crc kubenswrapper[4910]: I0105 23:41:06.151805 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_47fd1e78-1916-411b-9841-a503c9fdc455/glance-log/0.log" Jan 05 23:41:06 crc kubenswrapper[4910]: I0105 23:41:06.168298 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_47396791-d60d-4902-b7b7-7c798ac6136f/glance-log/0.log" Jan 05 23:41:06 crc kubenswrapper[4910]: I0105 23:41:06.234007 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_47396791-d60d-4902-b7b7-7c798ac6136f/glance-httpd/0.log" Jan 05 23:41:06 crc kubenswrapper[4910]: I0105 23:41:06.399031 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-2888-account-create-update-5gtwz_ac8e2000-282c-4602-b740-b834d9d58e0f/mariadb-account-create-update/0.log" Jan 05 23:41:06 crc kubenswrapper[4910]: I0105 23:41:06.493801 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-api-56ffd66999-8rtfw_6b6cce0e-2a92-49dd-8c47-0e453905b9ea/heat-api/0.log" Jan 05 23:41:06 crc kubenswrapper[4910]: I0105 23:41:06.669917 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-cfnapi-84c9f574d7-j7rfz_4af61181-253f-4ea6-b73c-d0853de6552b/heat-cfnapi/0.log" Jan 05 23:41:06 crc kubenswrapper[4910]: I0105 23:41:06.760381 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-db-create-tvm6d_11faec61-a084-4bfb-b9b4-06fe57b34754/mariadb-database-create/0.log" Jan 05 23:41:06 crc kubenswrapper[4910]: I0105 23:41:06.815748 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-db-sync-bxg5l_72257aab-c18e-432e-a662-73955418e381/heat-db-sync/0.log" Jan 05 23:41:06 crc kubenswrapper[4910]: I0105 23:41:06.951761 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-engine-57bb7c69d4-76xm4_70c09a40-83e9-4f29-8718-26e434fd2935/heat-engine/0.log" Jan 05 23:41:07 crc kubenswrapper[4910]: I0105 23:41:07.061419 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-68557c6fd9-rbl2l_8f15c3c0-9df1-4999-80e0-d3ebd88a76a8/horizon-log/0.log" Jan 05 23:41:07 crc kubenswrapper[4910]: I0105 23:41:07.068520 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-68557c6fd9-rbl2l_8f15c3c0-9df1-4999-80e0-d3ebd88a76a8/horizon/0.log" Jan 05 23:41:07 crc kubenswrapper[4910]: I0105 23:41:07.254384 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-867597f569-css8k_a890ac8e-7d88-463b-90e8-36f55b2c3b6c/keystone-api/0.log" Jan 05 23:41:07 crc kubenswrapper[4910]: I0105 23:41:07.299463 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_84e5ccdc-97e4-4be2-ad7d-ff34058e10c5/kube-state-metrics/0.log" Jan 05 23:41:07 crc kubenswrapper[4910]: I0105 23:41:07.486836 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-1129-account-create-update-qbxlb_b2c2edf2-0678-437b-aa0a-1b5448266d93/mariadb-account-create-update/0.log" Jan 05 23:41:07 crc kubenswrapper[4910]: I0105 23:41:07.519607 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-2mvns" podUID="1c768ac3-99a9-414c-8b4a-cd3da396bae8" containerName="registry-server" containerID="cri-o://a7a994c767b4b21e901e8d180d2525e33ad803565727fa44cc1a5b8651ca8dea" gracePeriod=2 Jan 05 23:41:07 crc kubenswrapper[4910]: I0105 23:41:07.593033 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c/manila-api-log/0.log" Jan 05 23:41:07 crc kubenswrapper[4910]: I0105 23:41:07.651556 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_ab13a3ad-f39a-4ef5-8861-e59c1c7bb17c/manila-api/0.log" Jan 05 23:41:07 crc kubenswrapper[4910]: I0105 23:41:07.685547 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-db-create-n4xcf_93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1/mariadb-database-create/0.log" Jan 05 23:41:07 crc kubenswrapper[4910]: I0105 23:41:07.930435 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-db-sync-dcfxt_1e41b44c-e6c9-473f-870e-52fc55ef73ff/manila-db-sync/0.log" Jan 05 23:41:07 crc kubenswrapper[4910]: I0105 23:41:07.965963 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_16a933d3-e82f-4dc3-bc84-d961be9aacb2/manila-scheduler/0.log" Jan 05 23:41:08 crc kubenswrapper[4910]: I0105 23:41:08.030794 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2mvns" Jan 05 23:41:08 crc kubenswrapper[4910]: I0105 23:41:08.086786 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_16a933d3-e82f-4dc3-bc84-d961be9aacb2/probe/0.log" Jan 05 23:41:08 crc kubenswrapper[4910]: I0105 23:41:08.183208 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9vq2f\" (UniqueName: \"kubernetes.io/projected/1c768ac3-99a9-414c-8b4a-cd3da396bae8-kube-api-access-9vq2f\") pod \"1c768ac3-99a9-414c-8b4a-cd3da396bae8\" (UID: \"1c768ac3-99a9-414c-8b4a-cd3da396bae8\") " Jan 05 23:41:08 crc kubenswrapper[4910]: I0105 23:41:08.183325 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c768ac3-99a9-414c-8b4a-cd3da396bae8-catalog-content\") pod \"1c768ac3-99a9-414c-8b4a-cd3da396bae8\" (UID: \"1c768ac3-99a9-414c-8b4a-cd3da396bae8\") " Jan 05 23:41:08 crc kubenswrapper[4910]: I0105 23:41:08.183389 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c768ac3-99a9-414c-8b4a-cd3da396bae8-utilities\") pod \"1c768ac3-99a9-414c-8b4a-cd3da396bae8\" (UID: \"1c768ac3-99a9-414c-8b4a-cd3da396bae8\") " Jan 05 23:41:08 crc kubenswrapper[4910]: I0105 23:41:08.184449 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c768ac3-99a9-414c-8b4a-cd3da396bae8-utilities" (OuterVolumeSpecName: "utilities") pod "1c768ac3-99a9-414c-8b4a-cd3da396bae8" (UID: "1c768ac3-99a9-414c-8b4a-cd3da396bae8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:41:08 crc kubenswrapper[4910]: I0105 23:41:08.197695 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c768ac3-99a9-414c-8b4a-cd3da396bae8-kube-api-access-9vq2f" (OuterVolumeSpecName: "kube-api-access-9vq2f") pod "1c768ac3-99a9-414c-8b4a-cd3da396bae8" (UID: "1c768ac3-99a9-414c-8b4a-cd3da396bae8"). InnerVolumeSpecName "kube-api-access-9vq2f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:41:08 crc kubenswrapper[4910]: I0105 23:41:08.250698 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c768ac3-99a9-414c-8b4a-cd3da396bae8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1c768ac3-99a9-414c-8b4a-cd3da396bae8" (UID: "1c768ac3-99a9-414c-8b4a-cd3da396bae8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:41:08 crc kubenswrapper[4910]: I0105 23:41:08.286187 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c768ac3-99a9-414c-8b4a-cd3da396bae8-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 23:41:08 crc kubenswrapper[4910]: I0105 23:41:08.286681 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c768ac3-99a9-414c-8b4a-cd3da396bae8-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 23:41:08 crc kubenswrapper[4910]: I0105 23:41:08.286695 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9vq2f\" (UniqueName: \"kubernetes.io/projected/1c768ac3-99a9-414c-8b4a-cd3da396bae8-kube-api-access-9vq2f\") on node \"crc\" DevicePath \"\"" Jan 05 23:41:08 crc kubenswrapper[4910]: I0105 23:41:08.328956 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_140e458d-aa1e-4579-a47e-1b306153da30/probe/0.log" Jan 05 23:41:08 crc kubenswrapper[4910]: I0105 23:41:08.347469 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_140e458d-aa1e-4579-a47e-1b306153da30/manila-share/0.log" Jan 05 23:41:08 crc kubenswrapper[4910]: I0105 23:41:08.375267 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-copy-data_bfa6bb42-5c0e-4ef6-9378-d52fa1fbfb8c/adoption/0.log" Jan 05 23:41:08 crc kubenswrapper[4910]: I0105 23:41:08.530100 4910 generic.go:334] "Generic (PLEG): container finished" podID="1c768ac3-99a9-414c-8b4a-cd3da396bae8" containerID="a7a994c767b4b21e901e8d180d2525e33ad803565727fa44cc1a5b8651ca8dea" exitCode=0 Jan 05 23:41:08 crc kubenswrapper[4910]: I0105 23:41:08.530165 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2mvns" event={"ID":"1c768ac3-99a9-414c-8b4a-cd3da396bae8","Type":"ContainerDied","Data":"a7a994c767b4b21e901e8d180d2525e33ad803565727fa44cc1a5b8651ca8dea"} Jan 05 23:41:08 crc kubenswrapper[4910]: I0105 23:41:08.530193 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2mvns" event={"ID":"1c768ac3-99a9-414c-8b4a-cd3da396bae8","Type":"ContainerDied","Data":"d031a522b43a5aecdb11e6702ee9d6aad345ccaa576e8fd0185310ab7302cfd2"} Jan 05 23:41:08 crc kubenswrapper[4910]: I0105 23:41:08.530210 4910 scope.go:117] "RemoveContainer" containerID="a7a994c767b4b21e901e8d180d2525e33ad803565727fa44cc1a5b8651ca8dea" Jan 05 23:41:08 crc kubenswrapper[4910]: I0105 23:41:08.530228 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2mvns" Jan 05 23:41:08 crc kubenswrapper[4910]: I0105 23:41:08.577335 4910 scope.go:117] "RemoveContainer" containerID="8a02c9fae5050a5116a8146bcb001d5c615ba3e9637d3490481c496e871db942" Jan 05 23:41:08 crc kubenswrapper[4910]: I0105 23:41:08.624213 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2mvns"] Jan 05 23:41:08 crc kubenswrapper[4910]: I0105 23:41:08.649349 4910 scope.go:117] "RemoveContainer" containerID="976b5431749cf859486d2c685e3dd975cd571b59d8a353242fa95597deaf6bde" Jan 05 23:41:08 crc kubenswrapper[4910]: I0105 23:41:08.643103 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-2mvns"] Jan 05 23:41:08 crc kubenswrapper[4910]: I0105 23:41:08.693355 4910 scope.go:117] "RemoveContainer" containerID="a7a994c767b4b21e901e8d180d2525e33ad803565727fa44cc1a5b8651ca8dea" Jan 05 23:41:08 crc kubenswrapper[4910]: E0105 23:41:08.694896 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7a994c767b4b21e901e8d180d2525e33ad803565727fa44cc1a5b8651ca8dea\": container with ID starting with a7a994c767b4b21e901e8d180d2525e33ad803565727fa44cc1a5b8651ca8dea not found: ID does not exist" containerID="a7a994c767b4b21e901e8d180d2525e33ad803565727fa44cc1a5b8651ca8dea" Jan 05 23:41:08 crc kubenswrapper[4910]: I0105 23:41:08.694927 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7a994c767b4b21e901e8d180d2525e33ad803565727fa44cc1a5b8651ca8dea"} err="failed to get container status \"a7a994c767b4b21e901e8d180d2525e33ad803565727fa44cc1a5b8651ca8dea\": rpc error: code = NotFound desc = could not find container \"a7a994c767b4b21e901e8d180d2525e33ad803565727fa44cc1a5b8651ca8dea\": container with ID starting with a7a994c767b4b21e901e8d180d2525e33ad803565727fa44cc1a5b8651ca8dea not found: ID does not exist" Jan 05 23:41:08 crc kubenswrapper[4910]: I0105 23:41:08.694951 4910 scope.go:117] "RemoveContainer" containerID="8a02c9fae5050a5116a8146bcb001d5c615ba3e9637d3490481c496e871db942" Jan 05 23:41:08 crc kubenswrapper[4910]: E0105 23:41:08.700190 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8a02c9fae5050a5116a8146bcb001d5c615ba3e9637d3490481c496e871db942\": container with ID starting with 8a02c9fae5050a5116a8146bcb001d5c615ba3e9637d3490481c496e871db942 not found: ID does not exist" containerID="8a02c9fae5050a5116a8146bcb001d5c615ba3e9637d3490481c496e871db942" Jan 05 23:41:08 crc kubenswrapper[4910]: I0105 23:41:08.700222 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8a02c9fae5050a5116a8146bcb001d5c615ba3e9637d3490481c496e871db942"} err="failed to get container status \"8a02c9fae5050a5116a8146bcb001d5c615ba3e9637d3490481c496e871db942\": rpc error: code = NotFound desc = could not find container \"8a02c9fae5050a5116a8146bcb001d5c615ba3e9637d3490481c496e871db942\": container with ID starting with 8a02c9fae5050a5116a8146bcb001d5c615ba3e9637d3490481c496e871db942 not found: ID does not exist" Jan 05 23:41:08 crc kubenswrapper[4910]: I0105 23:41:08.700240 4910 scope.go:117] "RemoveContainer" containerID="976b5431749cf859486d2c685e3dd975cd571b59d8a353242fa95597deaf6bde" Jan 05 23:41:08 crc kubenswrapper[4910]: E0105 23:41:08.705191 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"976b5431749cf859486d2c685e3dd975cd571b59d8a353242fa95597deaf6bde\": container with ID starting with 976b5431749cf859486d2c685e3dd975cd571b59d8a353242fa95597deaf6bde not found: ID does not exist" containerID="976b5431749cf859486d2c685e3dd975cd571b59d8a353242fa95597deaf6bde" Jan 05 23:41:08 crc kubenswrapper[4910]: I0105 23:41:08.705216 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"976b5431749cf859486d2c685e3dd975cd571b59d8a353242fa95597deaf6bde"} err="failed to get container status \"976b5431749cf859486d2c685e3dd975cd571b59d8a353242fa95597deaf6bde\": rpc error: code = NotFound desc = could not find container \"976b5431749cf859486d2c685e3dd975cd571b59d8a353242fa95597deaf6bde\": container with ID starting with 976b5431749cf859486d2c685e3dd975cd571b59d8a353242fa95597deaf6bde not found: ID does not exist" Jan 05 23:41:08 crc kubenswrapper[4910]: I0105 23:41:08.741013 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c768ac3-99a9-414c-8b4a-cd3da396bae8" path="/var/lib/kubelet/pods/1c768ac3-99a9-414c-8b4a-cd3da396bae8/volumes" Jan 05 23:41:08 crc kubenswrapper[4910]: I0105 23:41:08.879376 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-6bc969c685-87gv2_d6bd3395-e6e2-441d-8181-122d421e5947/neutron-api/0.log" Jan 05 23:41:09 crc kubenswrapper[4910]: I0105 23:41:09.019828 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-6bc969c685-87gv2_d6bd3395-e6e2-441d-8181-122d421e5947/neutron-httpd/0.log" Jan 05 23:41:09 crc kubenswrapper[4910]: I0105 23:41:09.299848 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_b4ff4c84-7f6f-4bf8-a602-bbe609bf14c3/nova-api-api/0.log" Jan 05 23:41:09 crc kubenswrapper[4910]: I0105 23:41:09.346668 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_b4ff4c84-7f6f-4bf8-a602-bbe609bf14c3/nova-api-log/0.log" Jan 05 23:41:09 crc kubenswrapper[4910]: I0105 23:41:09.661437 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_5d354431-26ed-479a-baa3-bd24ab0abc2a/nova-cell0-conductor-conductor/0.log" Jan 05 23:41:09 crc kubenswrapper[4910]: I0105 23:41:09.694277 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_25abe292-c3e5-41cf-956e-c69b86c10ba1/nova-cell1-conductor-conductor/0.log" Jan 05 23:41:10 crc kubenswrapper[4910]: I0105 23:41:10.003846 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_cd2984f0-6e89-4170-a405-553944d7aad2/nova-cell1-novncproxy-novncproxy/0.log" Jan 05 23:41:10 crc kubenswrapper[4910]: I0105 23:41:10.105448 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_d6f7b35f-ff01-4fa9-b914-7735a6bf716e/nova-metadata-log/0.log" Jan 05 23:41:10 crc kubenswrapper[4910]: I0105 23:41:10.198380 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_d6f7b35f-ff01-4fa9-b914-7735a6bf716e/nova-metadata-metadata/0.log" Jan 05 23:41:10 crc kubenswrapper[4910]: I0105 23:41:10.340594 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_daf56c5f-975d-4fd6-bf96-852ddc0b476b/nova-scheduler-scheduler/0.log" Jan 05 23:41:10 crc kubenswrapper[4910]: I0105 23:41:10.513186 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-584c6dd788-6g8mm_e8831b55-8e5e-4003-ac04-31b3af151959/init/0.log" Jan 05 23:41:10 crc kubenswrapper[4910]: I0105 23:41:10.717568 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-584c6dd788-6g8mm_e8831b55-8e5e-4003-ac04-31b3af151959/init/0.log" Jan 05 23:41:10 crc kubenswrapper[4910]: I0105 23:41:10.756088 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-584c6dd788-6g8mm_e8831b55-8e5e-4003-ac04-31b3af151959/octavia-api-provider-agent/0.log" Jan 05 23:41:10 crc kubenswrapper[4910]: I0105 23:41:10.929967 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-healthmanager-vd7kz_1262f577-e64a-4760-818a-961ba274fc65/init/0.log" Jan 05 23:41:10 crc kubenswrapper[4910]: I0105 23:41:10.935420 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-584c6dd788-6g8mm_e8831b55-8e5e-4003-ac04-31b3af151959/octavia-api/0.log" Jan 05 23:41:11 crc kubenswrapper[4910]: I0105 23:41:11.223034 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-healthmanager-vd7kz_1262f577-e64a-4760-818a-961ba274fc65/init/0.log" Jan 05 23:41:11 crc kubenswrapper[4910]: I0105 23:41:11.268259 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-healthmanager-vd7kz_1262f577-e64a-4760-818a-961ba274fc65/octavia-healthmanager/0.log" Jan 05 23:41:11 crc kubenswrapper[4910]: I0105 23:41:11.287385 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-housekeeping-tm48g_96c79f1a-fd70-4983-8ae6-e879e87c702d/init/0.log" Jan 05 23:41:11 crc kubenswrapper[4910]: I0105 23:41:11.578946 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-housekeeping-tm48g_96c79f1a-fd70-4983-8ae6-e879e87c702d/init/0.log" Jan 05 23:41:11 crc kubenswrapper[4910]: I0105 23:41:11.670247 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-rsyslog-qkp4b_b05718f5-8878-4646-beaa-0cea45fcfda9/init/0.log" Jan 05 23:41:11 crc kubenswrapper[4910]: I0105 23:41:11.747252 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-housekeeping-tm48g_96c79f1a-fd70-4983-8ae6-e879e87c702d/octavia-housekeeping/0.log" Jan 05 23:41:11 crc kubenswrapper[4910]: I0105 23:41:11.904598 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-rsyslog-qkp4b_b05718f5-8878-4646-beaa-0cea45fcfda9/init/0.log" Jan 05 23:41:11 crc kubenswrapper[4910]: I0105 23:41:11.911556 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-rsyslog-qkp4b_b05718f5-8878-4646-beaa-0cea45fcfda9/octavia-rsyslog/0.log" Jan 05 23:41:12 crc kubenswrapper[4910]: I0105 23:41:12.041660 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-worker-r2r2n_8f3b2391-d3d6-4a8a-92ca-b10efda049f1/init/0.log" Jan 05 23:41:12 crc kubenswrapper[4910]: I0105 23:41:12.259567 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-worker-r2r2n_8f3b2391-d3d6-4a8a-92ca-b10efda049f1/init/0.log" Jan 05 23:41:12 crc kubenswrapper[4910]: I0105 23:41:12.677316 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_898b9f52-035a-4f23-8362-10bbd49da54e/mysql-bootstrap/0.log" Jan 05 23:41:12 crc kubenswrapper[4910]: I0105 23:41:12.832373 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-worker-r2r2n_8f3b2391-d3d6-4a8a-92ca-b10efda049f1/octavia-worker/0.log" Jan 05 23:41:12 crc kubenswrapper[4910]: I0105 23:41:12.905429 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_898b9f52-035a-4f23-8362-10bbd49da54e/mysql-bootstrap/0.log" Jan 05 23:41:13 crc kubenswrapper[4910]: I0105 23:41:13.020158 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_898b9f52-035a-4f23-8362-10bbd49da54e/galera/0.log" Jan 05 23:41:13 crc kubenswrapper[4910]: I0105 23:41:13.398202 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_d18ec054-49b0-49da-bf27-16a8ac236b5d/mysql-bootstrap/0.log" Jan 05 23:41:13 crc kubenswrapper[4910]: I0105 23:41:13.602455 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_d18ec054-49b0-49da-bf27-16a8ac236b5d/mysql-bootstrap/0.log" Jan 05 23:41:13 crc kubenswrapper[4910]: I0105 23:41:13.650035 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_cb59e8ea-232f-490d-aa22-20b19bda2906/openstackclient/0.log" Jan 05 23:41:13 crc kubenswrapper[4910]: I0105 23:41:13.684212 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_d18ec054-49b0-49da-bf27-16a8ac236b5d/galera/0.log" Jan 05 23:41:13 crc kubenswrapper[4910]: I0105 23:41:13.971439 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-bbzzm_dbf59dc9-3276-47cc-8637-829289d0ba8c/openstack-network-exporter/0.log" Jan 05 23:41:14 crc kubenswrapper[4910]: I0105 23:41:14.002086 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-gn4cl_4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e/ovsdb-server-init/0.log" Jan 05 23:41:14 crc kubenswrapper[4910]: I0105 23:41:14.173341 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-gn4cl_4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e/ovsdb-server-init/0.log" Jan 05 23:41:14 crc kubenswrapper[4910]: I0105 23:41:14.189550 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-gn4cl_4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e/ovs-vswitchd/0.log" Jan 05 23:41:14 crc kubenswrapper[4910]: I0105 23:41:14.323054 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-gn4cl_4c5ec0bc-3c5c-4b65-afb1-1691bad96b5e/ovsdb-server/0.log" Jan 05 23:41:14 crc kubenswrapper[4910]: I0105 23:41:14.452425 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-s2ngj_63f387cd-1877-425f-8e52-5fe854426c89/ovn-controller/0.log" Jan 05 23:41:14 crc kubenswrapper[4910]: I0105 23:41:14.514506 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-copy-data_a9bfffd0-f255-43e2-8c45-bf4ce76358ff/adoption/0.log" Jan 05 23:41:14 crc kubenswrapper[4910]: I0105 23:41:14.698269 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_12828c14-528f-4cf9-823b-acb71c5a4332/ovn-northd/0.log" Jan 05 23:41:14 crc kubenswrapper[4910]: I0105 23:41:14.743421 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_12828c14-528f-4cf9-823b-acb71c5a4332/openstack-network-exporter/0.log" Jan 05 23:41:14 crc kubenswrapper[4910]: I0105 23:41:14.938890 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_2c4bebcb-a217-4298-a65a-bc6bc3e22a12/openstack-network-exporter/0.log" Jan 05 23:41:14 crc kubenswrapper[4910]: I0105 23:41:14.963969 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_2c4bebcb-a217-4298-a65a-bc6bc3e22a12/ovsdbserver-nb/0.log" Jan 05 23:41:15 crc kubenswrapper[4910]: I0105 23:41:15.150772 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-1_591431b6-fd67-4c89-ade6-029bd9e33d62/openstack-network-exporter/0.log" Jan 05 23:41:15 crc kubenswrapper[4910]: I0105 23:41:15.204755 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-1_591431b6-fd67-4c89-ade6-029bd9e33d62/ovsdbserver-nb/0.log" Jan 05 23:41:15 crc kubenswrapper[4910]: I0105 23:41:15.359090 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-2_455e6927-176e-4136-aeb8-17cebb8f16a6/ovsdbserver-nb/0.log" Jan 05 23:41:15 crc kubenswrapper[4910]: I0105 23:41:15.359874 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-2_455e6927-176e-4136-aeb8-17cebb8f16a6/openstack-network-exporter/0.log" Jan 05 23:41:15 crc kubenswrapper[4910]: I0105 23:41:15.446082 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_5f7c71a9-62c5-45fa-ae02-416a77a410d3/openstack-network-exporter/0.log" Jan 05 23:41:15 crc kubenswrapper[4910]: I0105 23:41:15.631149 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_5f7c71a9-62c5-45fa-ae02-416a77a410d3/ovsdbserver-sb/0.log" Jan 05 23:41:15 crc kubenswrapper[4910]: I0105 23:41:15.719905 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-1_18d58720-d6ec-455f-81be-b70f02d66b95/openstack-network-exporter/0.log" Jan 05 23:41:15 crc kubenswrapper[4910]: I0105 23:41:15.769688 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-1_18d58720-d6ec-455f-81be-b70f02d66b95/ovsdbserver-sb/0.log" Jan 05 23:41:15 crc kubenswrapper[4910]: I0105 23:41:15.841645 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_7ab1341d-7501-4be3-aa43-e03cb032084e/memcached/0.log" Jan 05 23:41:15 crc kubenswrapper[4910]: I0105 23:41:15.962216 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-2_88b3afe6-1d81-45e3-bf42-2bda83b89872/openstack-network-exporter/0.log" Jan 05 23:41:16 crc kubenswrapper[4910]: I0105 23:41:16.006012 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-2_88b3afe6-1d81-45e3-bf42-2bda83b89872/ovsdbserver-sb/0.log" Jan 05 23:41:16 crc kubenswrapper[4910]: I0105 23:41:16.201495 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-77c85cc8c4-khhdx_564d3691-3354-45ec-b2b7-29413b00f611/placement-api/0.log" Jan 05 23:41:16 crc kubenswrapper[4910]: I0105 23:41:16.253234 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-77c85cc8c4-khhdx_564d3691-3354-45ec-b2b7-29413b00f611/placement-log/0.log" Jan 05 23:41:16 crc kubenswrapper[4910]: I0105 23:41:16.295681 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_pre-adoption-validation-openstack-pre-adoption-openstack-cr25g7_8a544f9e-9220-4cea-8c49-09d188124708/pre-adoption-validation-openstack-pre-adoption-openstack-cell1/0.log" Jan 05 23:41:16 crc kubenswrapper[4910]: I0105 23:41:16.472358 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_pre-adoption-validation-openstack-pre-adoption-openstack-crwmkt_e10e76ce-5721-496e-93bc-b3566d1a3d8e/pre-adoption-validation-openstack-pre-adoption-openstack-cell1/0.log" Jan 05 23:41:16 crc kubenswrapper[4910]: I0105 23:41:16.526444 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_40ee7d85-f633-4577-817a-d8827050a814/init-config-reloader/0.log" Jan 05 23:41:16 crc kubenswrapper[4910]: I0105 23:41:16.787802 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_40ee7d85-f633-4577-817a-d8827050a814/thanos-sidecar/0.log" Jan 05 23:41:16 crc kubenswrapper[4910]: I0105 23:41:16.788058 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_40ee7d85-f633-4577-817a-d8827050a814/init-config-reloader/0.log" Jan 05 23:41:16 crc kubenswrapper[4910]: I0105 23:41:16.806656 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_40ee7d85-f633-4577-817a-d8827050a814/prometheus/0.log" Jan 05 23:41:16 crc kubenswrapper[4910]: I0105 23:41:16.825288 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_40ee7d85-f633-4577-817a-d8827050a814/config-reloader/0.log" Jan 05 23:41:16 crc kubenswrapper[4910]: I0105 23:41:16.968694 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_2314902d-61c8-428f-b2f7-b47e7a9c9d2e/setup-container/0.log" Jan 05 23:41:17 crc kubenswrapper[4910]: I0105 23:41:17.430504 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_2314902d-61c8-428f-b2f7-b47e7a9c9d2e/setup-container/0.log" Jan 05 23:41:17 crc kubenswrapper[4910]: I0105 23:41:17.467079 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_395698cd-ffef-4d1f-959f-39c54c8b76f8/setup-container/0.log" Jan 05 23:41:17 crc kubenswrapper[4910]: I0105 23:41:17.528059 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_2314902d-61c8-428f-b2f7-b47e7a9c9d2e/rabbitmq/0.log" Jan 05 23:41:17 crc kubenswrapper[4910]: I0105 23:41:17.627683 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_395698cd-ffef-4d1f-959f-39c54c8b76f8/setup-container/0.log" Jan 05 23:41:17 crc kubenswrapper[4910]: I0105 23:41:17.792179 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_395698cd-ffef-4d1f-959f-39c54c8b76f8/rabbitmq/0.log" Jan 05 23:41:40 crc kubenswrapper[4910]: I0105 23:41:40.852050 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-f6f74d6db-t7xl6_11188457-aabb-45d0-85d5-3ae1fc7a085f/manager/0.log" Jan 05 23:41:40 crc kubenswrapper[4910]: I0105 23:41:40.877661 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw_d1097843-51dd-4524-958e-0c9322ec6600/util/0.log" Jan 05 23:41:41 crc kubenswrapper[4910]: I0105 23:41:41.047204 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw_d1097843-51dd-4524-958e-0c9322ec6600/util/0.log" Jan 05 23:41:41 crc kubenswrapper[4910]: I0105 23:41:41.047948 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw_d1097843-51dd-4524-958e-0c9322ec6600/pull/0.log" Jan 05 23:41:41 crc kubenswrapper[4910]: I0105 23:41:41.078504 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw_d1097843-51dd-4524-958e-0c9322ec6600/pull/0.log" Jan 05 23:41:41 crc kubenswrapper[4910]: I0105 23:41:41.251798 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw_d1097843-51dd-4524-958e-0c9322ec6600/pull/0.log" Jan 05 23:41:41 crc kubenswrapper[4910]: I0105 23:41:41.260571 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw_d1097843-51dd-4524-958e-0c9322ec6600/util/0.log" Jan 05 23:41:41 crc kubenswrapper[4910]: I0105 23:41:41.275381 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_c492f4a2acbe999979d81ac1ecd4689567cc191c6b559aea7f96522f5dxfcpw_d1097843-51dd-4524-958e-0c9322ec6600/extract/0.log" Jan 05 23:41:41 crc kubenswrapper[4910]: I0105 23:41:41.524365 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-66f8b87655-d6q5k_af80b86a-ae5b-4e42-b4c7-9fc033d4fd26/manager/0.log" Jan 05 23:41:41 crc kubenswrapper[4910]: I0105 23:41:41.529012 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-78979fc445-v4jmq_10a1f9a3-7d22-4e02-8b5b-4ae1374194cf/manager/0.log" Jan 05 23:41:41 crc kubenswrapper[4910]: I0105 23:41:41.785207 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-658dd65b86-hjhdr_69c2eea7-1ac0-42b2-b1b7-d4ffaba1a9b7/manager/0.log" Jan 05 23:41:41 crc kubenswrapper[4910]: I0105 23:41:41.846659 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-7b549fc966-67s8n_c3362390-1824-422e-8a9b-dfcfc1098cfd/manager/0.log" Jan 05 23:41:41 crc kubenswrapper[4910]: I0105 23:41:41.989069 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-7f5ddd8d7b-ht2cs_98338ebd-fd6a-49de-a042-edb94e115570/manager/0.log" Jan 05 23:41:42 crc kubenswrapper[4910]: I0105 23:41:42.203422 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-f99f54bc8-hfxg8_6bfbc4ba-ffa7-42df-9123-945bbe818352/manager/0.log" Jan 05 23:41:42 crc kubenswrapper[4910]: I0105 23:41:42.469062 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6d99759cf-89lvl_b6d9ba63-bd01-49ba-bb3d-af83bcd5f3ed/manager/0.log" Jan 05 23:41:42 crc kubenswrapper[4910]: I0105 23:41:42.489352 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-598945d5b8-n7nvl_ab5bdb68-d0c3-436b-a7d5-36fe8be5bd90/manager/0.log" Jan 05 23:41:42 crc kubenswrapper[4910]: I0105 23:41:42.502927 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-568985c78-wk9gv_84b7e891-c710-431d-81e3-3d0fef0bf08e/manager/0.log" Jan 05 23:41:42 crc kubenswrapper[4910]: I0105 23:41:42.684518 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-7b88bfc995-fvgtr_c4b3f034-ce14-4081-a47c-feac32565388/manager/0.log" Jan 05 23:41:42 crc kubenswrapper[4910]: I0105 23:41:42.806107 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-7cd87b778f-trrvg_dee3cccb-8251-41b8-82ec-a696001f803d/manager/0.log" Jan 05 23:41:43 crc kubenswrapper[4910]: I0105 23:41:43.079236 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-5fbbf8b6cc-2qtrb_aa22d9f9-f865-495a-a0f0-a8aa424051aa/manager/0.log" Jan 05 23:41:43 crc kubenswrapper[4910]: I0105 23:41:43.087845 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-68c649d9d-p4tsz_ac3384d0-7a86-4e80-94a7-e0ff9bd32143/manager/0.log" Jan 05 23:41:43 crc kubenswrapper[4910]: I0105 23:41:43.177933 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-596c464d775rs5r_4743ff0b-8d16-4ee3-beb9-091a85bc7182/manager/0.log" Jan 05 23:41:43 crc kubenswrapper[4910]: I0105 23:41:43.580938 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-5845bc5b8-9pfbx_13c24329-71cc-45fb-93fe-94edbec20755/operator/0.log" Jan 05 23:41:43 crc kubenswrapper[4910]: I0105 23:41:43.734802 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-gfvrk_1ce59020-e2a4-4ba7-83cc-e080410e62d2/registry-server/0.log" Jan 05 23:41:43 crc kubenswrapper[4910]: I0105 23:41:43.903648 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-bf6d4f946-mrws8_e03b4a44-b8b3-46db-a760-cb3f43f83bea/manager/0.log" Jan 05 23:41:44 crc kubenswrapper[4910]: I0105 23:41:44.137751 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-9b6f8f78c-8j9cf_9224f0b2-2621-43c8-b061-66c826994814/manager/0.log" Jan 05 23:41:44 crc kubenswrapper[4910]: I0105 23:41:44.212531 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-cwjm5_31f503c0-3017-4c01-8594-7b6775a0f397/operator/0.log" Jan 05 23:41:44 crc kubenswrapper[4910]: I0105 23:41:44.378140 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-bb586bbf4-75hnw_5965fe2f-f268-418d-b039-682eb20f87ea/manager/0.log" Jan 05 23:41:44 crc kubenswrapper[4910]: I0105 23:41:44.577925 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-6c866cfdcb-j9dht_72bc4794-4890-40dc-9d78-6a02f2983ddf/manager/0.log" Jan 05 23:41:44 crc kubenswrapper[4910]: I0105 23:41:44.631164 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-68d988df55-4xm5m_299d2ab3-3f1b-4464-ab11-22aec9d915dd/manager/0.log" Jan 05 23:41:44 crc kubenswrapper[4910]: I0105 23:41:44.721177 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-9dbdf6486-mjkf5_9ba478cb-baeb-4955-b84b-872aacf97065/manager/0.log" Jan 05 23:41:45 crc kubenswrapper[4910]: I0105 23:41:45.246413 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-555f86cbf8-l5n82_2c5927c5-767c-49f2-91f1-c46608c506ff/manager/0.log" Jan 05 23:41:47 crc kubenswrapper[4910]: I0105 23:41:47.342324 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-clfsr"] Jan 05 23:41:47 crc kubenswrapper[4910]: E0105 23:41:47.343391 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c768ac3-99a9-414c-8b4a-cd3da396bae8" containerName="registry-server" Jan 05 23:41:47 crc kubenswrapper[4910]: I0105 23:41:47.343408 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c768ac3-99a9-414c-8b4a-cd3da396bae8" containerName="registry-server" Jan 05 23:41:47 crc kubenswrapper[4910]: E0105 23:41:47.343443 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c768ac3-99a9-414c-8b4a-cd3da396bae8" containerName="extract-utilities" Jan 05 23:41:47 crc kubenswrapper[4910]: I0105 23:41:47.343450 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c768ac3-99a9-414c-8b4a-cd3da396bae8" containerName="extract-utilities" Jan 05 23:41:47 crc kubenswrapper[4910]: E0105 23:41:47.343473 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c768ac3-99a9-414c-8b4a-cd3da396bae8" containerName="extract-content" Jan 05 23:41:47 crc kubenswrapper[4910]: I0105 23:41:47.343479 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c768ac3-99a9-414c-8b4a-cd3da396bae8" containerName="extract-content" Jan 05 23:41:47 crc kubenswrapper[4910]: I0105 23:41:47.343686 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c768ac3-99a9-414c-8b4a-cd3da396bae8" containerName="registry-server" Jan 05 23:41:47 crc kubenswrapper[4910]: I0105 23:41:47.349412 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-clfsr" Jan 05 23:41:47 crc kubenswrapper[4910]: I0105 23:41:47.360276 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-clfsr"] Jan 05 23:41:47 crc kubenswrapper[4910]: I0105 23:41:47.438176 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4bd7040c-a714-4e39-a111-5676d2639e1e-catalog-content\") pod \"redhat-marketplace-clfsr\" (UID: \"4bd7040c-a714-4e39-a111-5676d2639e1e\") " pod="openshift-marketplace/redhat-marketplace-clfsr" Jan 05 23:41:47 crc kubenswrapper[4910]: I0105 23:41:47.438314 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9xf5g\" (UniqueName: \"kubernetes.io/projected/4bd7040c-a714-4e39-a111-5676d2639e1e-kube-api-access-9xf5g\") pod \"redhat-marketplace-clfsr\" (UID: \"4bd7040c-a714-4e39-a111-5676d2639e1e\") " pod="openshift-marketplace/redhat-marketplace-clfsr" Jan 05 23:41:47 crc kubenswrapper[4910]: I0105 23:41:47.438390 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4bd7040c-a714-4e39-a111-5676d2639e1e-utilities\") pod \"redhat-marketplace-clfsr\" (UID: \"4bd7040c-a714-4e39-a111-5676d2639e1e\") " pod="openshift-marketplace/redhat-marketplace-clfsr" Jan 05 23:41:47 crc kubenswrapper[4910]: I0105 23:41:47.540818 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9xf5g\" (UniqueName: \"kubernetes.io/projected/4bd7040c-a714-4e39-a111-5676d2639e1e-kube-api-access-9xf5g\") pod \"redhat-marketplace-clfsr\" (UID: \"4bd7040c-a714-4e39-a111-5676d2639e1e\") " pod="openshift-marketplace/redhat-marketplace-clfsr" Jan 05 23:41:47 crc kubenswrapper[4910]: I0105 23:41:47.540918 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4bd7040c-a714-4e39-a111-5676d2639e1e-utilities\") pod \"redhat-marketplace-clfsr\" (UID: \"4bd7040c-a714-4e39-a111-5676d2639e1e\") " pod="openshift-marketplace/redhat-marketplace-clfsr" Jan 05 23:41:47 crc kubenswrapper[4910]: I0105 23:41:47.541039 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4bd7040c-a714-4e39-a111-5676d2639e1e-catalog-content\") pod \"redhat-marketplace-clfsr\" (UID: \"4bd7040c-a714-4e39-a111-5676d2639e1e\") " pod="openshift-marketplace/redhat-marketplace-clfsr" Jan 05 23:41:47 crc kubenswrapper[4910]: I0105 23:41:47.541406 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4bd7040c-a714-4e39-a111-5676d2639e1e-utilities\") pod \"redhat-marketplace-clfsr\" (UID: \"4bd7040c-a714-4e39-a111-5676d2639e1e\") " pod="openshift-marketplace/redhat-marketplace-clfsr" Jan 05 23:41:47 crc kubenswrapper[4910]: I0105 23:41:47.541508 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4bd7040c-a714-4e39-a111-5676d2639e1e-catalog-content\") pod \"redhat-marketplace-clfsr\" (UID: \"4bd7040c-a714-4e39-a111-5676d2639e1e\") " pod="openshift-marketplace/redhat-marketplace-clfsr" Jan 05 23:41:47 crc kubenswrapper[4910]: I0105 23:41:47.562911 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9xf5g\" (UniqueName: \"kubernetes.io/projected/4bd7040c-a714-4e39-a111-5676d2639e1e-kube-api-access-9xf5g\") pod \"redhat-marketplace-clfsr\" (UID: \"4bd7040c-a714-4e39-a111-5676d2639e1e\") " pod="openshift-marketplace/redhat-marketplace-clfsr" Jan 05 23:41:47 crc kubenswrapper[4910]: I0105 23:41:47.674822 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-clfsr" Jan 05 23:41:48 crc kubenswrapper[4910]: I0105 23:41:48.270714 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-clfsr"] Jan 05 23:41:48 crc kubenswrapper[4910]: I0105 23:41:48.925313 4910 generic.go:334] "Generic (PLEG): container finished" podID="4bd7040c-a714-4e39-a111-5676d2639e1e" containerID="ed08f626c18096e9734eb513365df652b784c251742344f629698e6437334c32" exitCode=0 Jan 05 23:41:48 crc kubenswrapper[4910]: I0105 23:41:48.925479 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-clfsr" event={"ID":"4bd7040c-a714-4e39-a111-5676d2639e1e","Type":"ContainerDied","Data":"ed08f626c18096e9734eb513365df652b784c251742344f629698e6437334c32"} Jan 05 23:41:48 crc kubenswrapper[4910]: I0105 23:41:48.925639 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-clfsr" event={"ID":"4bd7040c-a714-4e39-a111-5676d2639e1e","Type":"ContainerStarted","Data":"5f20c9426fbc83a179b68e3aea6b56180949761e7dee25bd841c910e6afba03d"} Jan 05 23:41:49 crc kubenswrapper[4910]: I0105 23:41:49.937017 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-clfsr" event={"ID":"4bd7040c-a714-4e39-a111-5676d2639e1e","Type":"ContainerStarted","Data":"f288b35e6a23cdb3460ed29ae2ba3ba2aee37d4bf25c7930909954099d3333d8"} Jan 05 23:41:50 crc kubenswrapper[4910]: I0105 23:41:50.952948 4910 generic.go:334] "Generic (PLEG): container finished" podID="4bd7040c-a714-4e39-a111-5676d2639e1e" containerID="f288b35e6a23cdb3460ed29ae2ba3ba2aee37d4bf25c7930909954099d3333d8" exitCode=0 Jan 05 23:41:50 crc kubenswrapper[4910]: I0105 23:41:50.953049 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-clfsr" event={"ID":"4bd7040c-a714-4e39-a111-5676d2639e1e","Type":"ContainerDied","Data":"f288b35e6a23cdb3460ed29ae2ba3ba2aee37d4bf25c7930909954099d3333d8"} Jan 05 23:41:51 crc kubenswrapper[4910]: I0105 23:41:51.963170 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-clfsr" event={"ID":"4bd7040c-a714-4e39-a111-5676d2639e1e","Type":"ContainerStarted","Data":"c2290c02fd9c3da1a1d90c33e330a7ac24a1db8677f62219798e48f1717c062d"} Jan 05 23:41:52 crc kubenswrapper[4910]: I0105 23:41:52.021747 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-clfsr" podStartSLOduration=2.510175017 podStartE2EDuration="5.021724967s" podCreationTimestamp="2026-01-05 23:41:47 +0000 UTC" firstStartedPulling="2026-01-05 23:41:48.927695211 +0000 UTC m=+6640.505192881" lastFinishedPulling="2026-01-05 23:41:51.439245161 +0000 UTC m=+6643.016742831" observedRunningTime="2026-01-05 23:41:52.006431672 +0000 UTC m=+6643.583929342" watchObservedRunningTime="2026-01-05 23:41:52.021724967 +0000 UTC m=+6643.599222637" Jan 05 23:41:57 crc kubenswrapper[4910]: I0105 23:41:57.676193 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-clfsr" Jan 05 23:41:57 crc kubenswrapper[4910]: I0105 23:41:57.676862 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-clfsr" Jan 05 23:41:57 crc kubenswrapper[4910]: I0105 23:41:57.732323 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-clfsr" Jan 05 23:41:58 crc kubenswrapper[4910]: I0105 23:41:58.091837 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-clfsr" Jan 05 23:41:58 crc kubenswrapper[4910]: I0105 23:41:58.148260 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-clfsr"] Jan 05 23:42:00 crc kubenswrapper[4910]: I0105 23:42:00.049996 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-clfsr" podUID="4bd7040c-a714-4e39-a111-5676d2639e1e" containerName="registry-server" containerID="cri-o://c2290c02fd9c3da1a1d90c33e330a7ac24a1db8677f62219798e48f1717c062d" gracePeriod=2 Jan 05 23:42:00 crc kubenswrapper[4910]: I0105 23:42:00.627984 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-clfsr" Jan 05 23:42:00 crc kubenswrapper[4910]: I0105 23:42:00.765113 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xf5g\" (UniqueName: \"kubernetes.io/projected/4bd7040c-a714-4e39-a111-5676d2639e1e-kube-api-access-9xf5g\") pod \"4bd7040c-a714-4e39-a111-5676d2639e1e\" (UID: \"4bd7040c-a714-4e39-a111-5676d2639e1e\") " Jan 05 23:42:00 crc kubenswrapper[4910]: I0105 23:42:00.765778 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4bd7040c-a714-4e39-a111-5676d2639e1e-utilities\") pod \"4bd7040c-a714-4e39-a111-5676d2639e1e\" (UID: \"4bd7040c-a714-4e39-a111-5676d2639e1e\") " Jan 05 23:42:00 crc kubenswrapper[4910]: I0105 23:42:00.765870 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4bd7040c-a714-4e39-a111-5676d2639e1e-catalog-content\") pod \"4bd7040c-a714-4e39-a111-5676d2639e1e\" (UID: \"4bd7040c-a714-4e39-a111-5676d2639e1e\") " Jan 05 23:42:00 crc kubenswrapper[4910]: I0105 23:42:00.768903 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4bd7040c-a714-4e39-a111-5676d2639e1e-utilities" (OuterVolumeSpecName: "utilities") pod "4bd7040c-a714-4e39-a111-5676d2639e1e" (UID: "4bd7040c-a714-4e39-a111-5676d2639e1e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:42:00 crc kubenswrapper[4910]: I0105 23:42:00.773049 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bd7040c-a714-4e39-a111-5676d2639e1e-kube-api-access-9xf5g" (OuterVolumeSpecName: "kube-api-access-9xf5g") pod "4bd7040c-a714-4e39-a111-5676d2639e1e" (UID: "4bd7040c-a714-4e39-a111-5676d2639e1e"). InnerVolumeSpecName "kube-api-access-9xf5g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:42:00 crc kubenswrapper[4910]: I0105 23:42:00.792007 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4bd7040c-a714-4e39-a111-5676d2639e1e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4bd7040c-a714-4e39-a111-5676d2639e1e" (UID: "4bd7040c-a714-4e39-a111-5676d2639e1e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:42:00 crc kubenswrapper[4910]: I0105 23:42:00.868583 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4bd7040c-a714-4e39-a111-5676d2639e1e-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 23:42:00 crc kubenswrapper[4910]: I0105 23:42:00.869162 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4bd7040c-a714-4e39-a111-5676d2639e1e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 23:42:00 crc kubenswrapper[4910]: I0105 23:42:00.869232 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xf5g\" (UniqueName: \"kubernetes.io/projected/4bd7040c-a714-4e39-a111-5676d2639e1e-kube-api-access-9xf5g\") on node \"crc\" DevicePath \"\"" Jan 05 23:42:01 crc kubenswrapper[4910]: I0105 23:42:01.077971 4910 generic.go:334] "Generic (PLEG): container finished" podID="4bd7040c-a714-4e39-a111-5676d2639e1e" containerID="c2290c02fd9c3da1a1d90c33e330a7ac24a1db8677f62219798e48f1717c062d" exitCode=0 Jan 05 23:42:01 crc kubenswrapper[4910]: I0105 23:42:01.078020 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-clfsr" event={"ID":"4bd7040c-a714-4e39-a111-5676d2639e1e","Type":"ContainerDied","Data":"c2290c02fd9c3da1a1d90c33e330a7ac24a1db8677f62219798e48f1717c062d"} Jan 05 23:42:01 crc kubenswrapper[4910]: I0105 23:42:01.078051 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-clfsr" event={"ID":"4bd7040c-a714-4e39-a111-5676d2639e1e","Type":"ContainerDied","Data":"5f20c9426fbc83a179b68e3aea6b56180949761e7dee25bd841c910e6afba03d"} Jan 05 23:42:01 crc kubenswrapper[4910]: I0105 23:42:01.078080 4910 scope.go:117] "RemoveContainer" containerID="c2290c02fd9c3da1a1d90c33e330a7ac24a1db8677f62219798e48f1717c062d" Jan 05 23:42:01 crc kubenswrapper[4910]: I0105 23:42:01.078410 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-clfsr" Jan 05 23:42:01 crc kubenswrapper[4910]: I0105 23:42:01.104503 4910 scope.go:117] "RemoveContainer" containerID="f288b35e6a23cdb3460ed29ae2ba3ba2aee37d4bf25c7930909954099d3333d8" Jan 05 23:42:01 crc kubenswrapper[4910]: I0105 23:42:01.125541 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-clfsr"] Jan 05 23:42:01 crc kubenswrapper[4910]: I0105 23:42:01.136346 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-clfsr"] Jan 05 23:42:01 crc kubenswrapper[4910]: I0105 23:42:01.142514 4910 scope.go:117] "RemoveContainer" containerID="ed08f626c18096e9734eb513365df652b784c251742344f629698e6437334c32" Jan 05 23:42:01 crc kubenswrapper[4910]: I0105 23:42:01.192440 4910 scope.go:117] "RemoveContainer" containerID="c2290c02fd9c3da1a1d90c33e330a7ac24a1db8677f62219798e48f1717c062d" Jan 05 23:42:01 crc kubenswrapper[4910]: E0105 23:42:01.192900 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c2290c02fd9c3da1a1d90c33e330a7ac24a1db8677f62219798e48f1717c062d\": container with ID starting with c2290c02fd9c3da1a1d90c33e330a7ac24a1db8677f62219798e48f1717c062d not found: ID does not exist" containerID="c2290c02fd9c3da1a1d90c33e330a7ac24a1db8677f62219798e48f1717c062d" Jan 05 23:42:01 crc kubenswrapper[4910]: I0105 23:42:01.192954 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c2290c02fd9c3da1a1d90c33e330a7ac24a1db8677f62219798e48f1717c062d"} err="failed to get container status \"c2290c02fd9c3da1a1d90c33e330a7ac24a1db8677f62219798e48f1717c062d\": rpc error: code = NotFound desc = could not find container \"c2290c02fd9c3da1a1d90c33e330a7ac24a1db8677f62219798e48f1717c062d\": container with ID starting with c2290c02fd9c3da1a1d90c33e330a7ac24a1db8677f62219798e48f1717c062d not found: ID does not exist" Jan 05 23:42:01 crc kubenswrapper[4910]: I0105 23:42:01.192991 4910 scope.go:117] "RemoveContainer" containerID="f288b35e6a23cdb3460ed29ae2ba3ba2aee37d4bf25c7930909954099d3333d8" Jan 05 23:42:01 crc kubenswrapper[4910]: E0105 23:42:01.193446 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f288b35e6a23cdb3460ed29ae2ba3ba2aee37d4bf25c7930909954099d3333d8\": container with ID starting with f288b35e6a23cdb3460ed29ae2ba3ba2aee37d4bf25c7930909954099d3333d8 not found: ID does not exist" containerID="f288b35e6a23cdb3460ed29ae2ba3ba2aee37d4bf25c7930909954099d3333d8" Jan 05 23:42:01 crc kubenswrapper[4910]: I0105 23:42:01.193487 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f288b35e6a23cdb3460ed29ae2ba3ba2aee37d4bf25c7930909954099d3333d8"} err="failed to get container status \"f288b35e6a23cdb3460ed29ae2ba3ba2aee37d4bf25c7930909954099d3333d8\": rpc error: code = NotFound desc = could not find container \"f288b35e6a23cdb3460ed29ae2ba3ba2aee37d4bf25c7930909954099d3333d8\": container with ID starting with f288b35e6a23cdb3460ed29ae2ba3ba2aee37d4bf25c7930909954099d3333d8 not found: ID does not exist" Jan 05 23:42:01 crc kubenswrapper[4910]: I0105 23:42:01.193506 4910 scope.go:117] "RemoveContainer" containerID="ed08f626c18096e9734eb513365df652b784c251742344f629698e6437334c32" Jan 05 23:42:01 crc kubenswrapper[4910]: E0105 23:42:01.193720 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed08f626c18096e9734eb513365df652b784c251742344f629698e6437334c32\": container with ID starting with ed08f626c18096e9734eb513365df652b784c251742344f629698e6437334c32 not found: ID does not exist" containerID="ed08f626c18096e9734eb513365df652b784c251742344f629698e6437334c32" Jan 05 23:42:01 crc kubenswrapper[4910]: I0105 23:42:01.193747 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed08f626c18096e9734eb513365df652b784c251742344f629698e6437334c32"} err="failed to get container status \"ed08f626c18096e9734eb513365df652b784c251742344f629698e6437334c32\": rpc error: code = NotFound desc = could not find container \"ed08f626c18096e9734eb513365df652b784c251742344f629698e6437334c32\": container with ID starting with ed08f626c18096e9734eb513365df652b784c251742344f629698e6437334c32 not found: ID does not exist" Jan 05 23:42:02 crc kubenswrapper[4910]: I0105 23:42:02.736200 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bd7040c-a714-4e39-a111-5676d2639e1e" path="/var/lib/kubelet/pods/4bd7040c-a714-4e39-a111-5676d2639e1e/volumes" Jan 05 23:42:07 crc kubenswrapper[4910]: I0105 23:42:07.884865 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-9s4dd_5d128f8c-6ea3-4ba0-96bc-8fcd5aac98bf/control-plane-machine-set-operator/0.log" Jan 05 23:42:08 crc kubenswrapper[4910]: I0105 23:42:08.118745 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-6d5lf_3526640e-85a9-41f1-b79d-c31854227b25/kube-rbac-proxy/0.log" Jan 05 23:42:08 crc kubenswrapper[4910]: I0105 23:42:08.179900 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-6d5lf_3526640e-85a9-41f1-b79d-c31854227b25/machine-api-operator/0.log" Jan 05 23:42:10 crc kubenswrapper[4910]: I0105 23:42:10.952690 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 23:42:10 crc kubenswrapper[4910]: I0105 23:42:10.953351 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 23:42:22 crc kubenswrapper[4910]: I0105 23:42:22.339608 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-86cb77c54b-lwkkl_6991aa21-d1ec-4b50-8675-4876f90b6c9f/cert-manager-controller/0.log" Jan 05 23:42:22 crc kubenswrapper[4910]: I0105 23:42:22.509270 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-855d9ccff4-dzs8b_d78320ee-d759-4e45-9cd8-c0c8f1570ef7/cert-manager-cainjector/0.log" Jan 05 23:42:22 crc kubenswrapper[4910]: I0105 23:42:22.559522 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-f4fb5df64-5rk2d_2af2cd66-2e31-4c27-b216-136a81c22df8/cert-manager-webhook/0.log" Jan 05 23:42:37 crc kubenswrapper[4910]: I0105 23:42:37.457087 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-6ff7998486-m62fj_8f07ce10-56ab-4b95-a099-1ab94c960aad/nmstate-console-plugin/0.log" Jan 05 23:42:37 crc kubenswrapper[4910]: I0105 23:42:37.668811 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-ww7rm_d75fd82b-afb0-400e-9db1-57e5d187dfbc/nmstate-handler/0.log" Jan 05 23:42:37 crc kubenswrapper[4910]: I0105 23:42:37.725584 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f7f7578db-4vzdw_d6b13462-1840-44b9-b85a-44a40509a366/kube-rbac-proxy/0.log" Jan 05 23:42:37 crc kubenswrapper[4910]: I0105 23:42:37.818763 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f7f7578db-4vzdw_d6b13462-1840-44b9-b85a-44a40509a366/nmstate-metrics/0.log" Jan 05 23:42:37 crc kubenswrapper[4910]: I0105 23:42:37.901795 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-6769fb99d-47b86_4d1fecfb-765b-420d-b427-1d3b9f5f14f7/nmstate-operator/0.log" Jan 05 23:42:38 crc kubenswrapper[4910]: I0105 23:42:38.016049 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-f8fb84555-gc9w5_3fc78fdb-7ef9-4185-b690-8a249946e4b9/nmstate-webhook/0.log" Jan 05 23:42:40 crc kubenswrapper[4910]: I0105 23:42:40.953137 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 23:42:40 crc kubenswrapper[4910]: I0105 23:42:40.953634 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 23:42:45 crc kubenswrapper[4910]: I0105 23:42:45.621054 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-lnr46"] Jan 05 23:42:45 crc kubenswrapper[4910]: E0105 23:42:45.622225 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4bd7040c-a714-4e39-a111-5676d2639e1e" containerName="extract-utilities" Jan 05 23:42:45 crc kubenswrapper[4910]: I0105 23:42:45.622242 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4bd7040c-a714-4e39-a111-5676d2639e1e" containerName="extract-utilities" Jan 05 23:42:45 crc kubenswrapper[4910]: E0105 23:42:45.622260 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4bd7040c-a714-4e39-a111-5676d2639e1e" containerName="extract-content" Jan 05 23:42:45 crc kubenswrapper[4910]: I0105 23:42:45.622269 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4bd7040c-a714-4e39-a111-5676d2639e1e" containerName="extract-content" Jan 05 23:42:45 crc kubenswrapper[4910]: E0105 23:42:45.622289 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4bd7040c-a714-4e39-a111-5676d2639e1e" containerName="registry-server" Jan 05 23:42:45 crc kubenswrapper[4910]: I0105 23:42:45.622296 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4bd7040c-a714-4e39-a111-5676d2639e1e" containerName="registry-server" Jan 05 23:42:45 crc kubenswrapper[4910]: I0105 23:42:45.622560 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4bd7040c-a714-4e39-a111-5676d2639e1e" containerName="registry-server" Jan 05 23:42:45 crc kubenswrapper[4910]: I0105 23:42:45.624347 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lnr46" Jan 05 23:42:45 crc kubenswrapper[4910]: I0105 23:42:45.645177 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lnr46"] Jan 05 23:42:45 crc kubenswrapper[4910]: I0105 23:42:45.721984 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f8155b1-31fe-486a-856f-d87ad0e0e2a1-utilities\") pod \"redhat-operators-lnr46\" (UID: \"8f8155b1-31fe-486a-856f-d87ad0e0e2a1\") " pod="openshift-marketplace/redhat-operators-lnr46" Jan 05 23:42:45 crc kubenswrapper[4910]: I0105 23:42:45.722027 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7fwtx\" (UniqueName: \"kubernetes.io/projected/8f8155b1-31fe-486a-856f-d87ad0e0e2a1-kube-api-access-7fwtx\") pod \"redhat-operators-lnr46\" (UID: \"8f8155b1-31fe-486a-856f-d87ad0e0e2a1\") " pod="openshift-marketplace/redhat-operators-lnr46" Jan 05 23:42:45 crc kubenswrapper[4910]: I0105 23:42:45.722084 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f8155b1-31fe-486a-856f-d87ad0e0e2a1-catalog-content\") pod \"redhat-operators-lnr46\" (UID: \"8f8155b1-31fe-486a-856f-d87ad0e0e2a1\") " pod="openshift-marketplace/redhat-operators-lnr46" Jan 05 23:42:45 crc kubenswrapper[4910]: I0105 23:42:45.824576 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f8155b1-31fe-486a-856f-d87ad0e0e2a1-utilities\") pod \"redhat-operators-lnr46\" (UID: \"8f8155b1-31fe-486a-856f-d87ad0e0e2a1\") " pod="openshift-marketplace/redhat-operators-lnr46" Jan 05 23:42:45 crc kubenswrapper[4910]: I0105 23:42:45.824981 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f8155b1-31fe-486a-856f-d87ad0e0e2a1-utilities\") pod \"redhat-operators-lnr46\" (UID: \"8f8155b1-31fe-486a-856f-d87ad0e0e2a1\") " pod="openshift-marketplace/redhat-operators-lnr46" Jan 05 23:42:45 crc kubenswrapper[4910]: I0105 23:42:45.825036 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7fwtx\" (UniqueName: \"kubernetes.io/projected/8f8155b1-31fe-486a-856f-d87ad0e0e2a1-kube-api-access-7fwtx\") pod \"redhat-operators-lnr46\" (UID: \"8f8155b1-31fe-486a-856f-d87ad0e0e2a1\") " pod="openshift-marketplace/redhat-operators-lnr46" Jan 05 23:42:45 crc kubenswrapper[4910]: I0105 23:42:45.825084 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f8155b1-31fe-486a-856f-d87ad0e0e2a1-catalog-content\") pod \"redhat-operators-lnr46\" (UID: \"8f8155b1-31fe-486a-856f-d87ad0e0e2a1\") " pod="openshift-marketplace/redhat-operators-lnr46" Jan 05 23:42:45 crc kubenswrapper[4910]: I0105 23:42:45.825350 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f8155b1-31fe-486a-856f-d87ad0e0e2a1-catalog-content\") pod \"redhat-operators-lnr46\" (UID: \"8f8155b1-31fe-486a-856f-d87ad0e0e2a1\") " pod="openshift-marketplace/redhat-operators-lnr46" Jan 05 23:42:45 crc kubenswrapper[4910]: I0105 23:42:45.855748 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7fwtx\" (UniqueName: \"kubernetes.io/projected/8f8155b1-31fe-486a-856f-d87ad0e0e2a1-kube-api-access-7fwtx\") pod \"redhat-operators-lnr46\" (UID: \"8f8155b1-31fe-486a-856f-d87ad0e0e2a1\") " pod="openshift-marketplace/redhat-operators-lnr46" Jan 05 23:42:45 crc kubenswrapper[4910]: I0105 23:42:45.942632 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lnr46" Jan 05 23:42:46 crc kubenswrapper[4910]: I0105 23:42:46.057215 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-2888-account-create-update-5gtwz"] Jan 05 23:42:46 crc kubenswrapper[4910]: I0105 23:42:46.063544 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-create-tvm6d"] Jan 05 23:42:46 crc kubenswrapper[4910]: I0105 23:42:46.074295 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-2888-account-create-update-5gtwz"] Jan 05 23:42:46 crc kubenswrapper[4910]: I0105 23:42:46.084860 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-create-tvm6d"] Jan 05 23:42:46 crc kubenswrapper[4910]: I0105 23:42:46.570716 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lnr46"] Jan 05 23:42:46 crc kubenswrapper[4910]: I0105 23:42:46.734407 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="11faec61-a084-4bfb-b9b4-06fe57b34754" path="/var/lib/kubelet/pods/11faec61-a084-4bfb-b9b4-06fe57b34754/volumes" Jan 05 23:42:46 crc kubenswrapper[4910]: I0105 23:42:46.735588 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac8e2000-282c-4602-b740-b834d9d58e0f" path="/var/lib/kubelet/pods/ac8e2000-282c-4602-b740-b834d9d58e0f/volumes" Jan 05 23:42:47 crc kubenswrapper[4910]: I0105 23:42:47.600766 4910 generic.go:334] "Generic (PLEG): container finished" podID="8f8155b1-31fe-486a-856f-d87ad0e0e2a1" containerID="2d6495a3c4bd3929b7c05c8376682b54cd0a6ebc8b921e6b55dfb95c8902ea47" exitCode=0 Jan 05 23:42:47 crc kubenswrapper[4910]: I0105 23:42:47.600817 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lnr46" event={"ID":"8f8155b1-31fe-486a-856f-d87ad0e0e2a1","Type":"ContainerDied","Data":"2d6495a3c4bd3929b7c05c8376682b54cd0a6ebc8b921e6b55dfb95c8902ea47"} Jan 05 23:42:47 crc kubenswrapper[4910]: I0105 23:42:47.600852 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lnr46" event={"ID":"8f8155b1-31fe-486a-856f-d87ad0e0e2a1","Type":"ContainerStarted","Data":"2fbf8a8d8e59fa9094cf8dd2c798b81b19c0f08f689328dbcb6366ff0ce96597"} Jan 05 23:42:47 crc kubenswrapper[4910]: I0105 23:42:47.603176 4910 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 05 23:42:48 crc kubenswrapper[4910]: I0105 23:42:48.610078 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lnr46" event={"ID":"8f8155b1-31fe-486a-856f-d87ad0e0e2a1","Type":"ContainerStarted","Data":"7ef6925be9343b75dcb8a020d1b29ab83f09ed066be823ae04366e61d139d596"} Jan 05 23:42:50 crc kubenswrapper[4910]: I0105 23:42:50.630634 4910 generic.go:334] "Generic (PLEG): container finished" podID="8f8155b1-31fe-486a-856f-d87ad0e0e2a1" containerID="7ef6925be9343b75dcb8a020d1b29ab83f09ed066be823ae04366e61d139d596" exitCode=0 Jan 05 23:42:50 crc kubenswrapper[4910]: I0105 23:42:50.630707 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lnr46" event={"ID":"8f8155b1-31fe-486a-856f-d87ad0e0e2a1","Type":"ContainerDied","Data":"7ef6925be9343b75dcb8a020d1b29ab83f09ed066be823ae04366e61d139d596"} Jan 05 23:42:51 crc kubenswrapper[4910]: I0105 23:42:51.643690 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lnr46" event={"ID":"8f8155b1-31fe-486a-856f-d87ad0e0e2a1","Type":"ContainerStarted","Data":"101c841176005c0af76639eee4cf91a08d671ae41637ec74d9e3b2429aac1f82"} Jan 05 23:42:51 crc kubenswrapper[4910]: I0105 23:42:51.666866 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-lnr46" podStartSLOduration=3.120477865 podStartE2EDuration="6.666844061s" podCreationTimestamp="2026-01-05 23:42:45 +0000 UTC" firstStartedPulling="2026-01-05 23:42:47.602940688 +0000 UTC m=+6699.180438358" lastFinishedPulling="2026-01-05 23:42:51.149306874 +0000 UTC m=+6702.726804554" observedRunningTime="2026-01-05 23:42:51.666008241 +0000 UTC m=+6703.243505921" watchObservedRunningTime="2026-01-05 23:42:51.666844061 +0000 UTC m=+6703.244341731" Jan 05 23:42:52 crc kubenswrapper[4910]: I0105 23:42:52.094344 4910 scope.go:117] "RemoveContainer" containerID="321b41a21f66f4581c85633a8ffcc12bc2af4ece73e8d4ba62c4dbb4ff7d49e0" Jan 05 23:42:52 crc kubenswrapper[4910]: I0105 23:42:52.138858 4910 scope.go:117] "RemoveContainer" containerID="37be7f31573ec3ed5152eeef8f5e33a9c5ad62f9746fc5cf9ea8ea3f39207f7f" Jan 05 23:42:55 crc kubenswrapper[4910]: I0105 23:42:55.627638 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-5bddd4b946-ztwkd_62d69338-9eb3-4401-95af-3dcaf1ce48d3/kube-rbac-proxy/0.log" Jan 05 23:42:55 crc kubenswrapper[4910]: I0105 23:42:55.921085 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dhdgh_f5a1ce03-3e27-472c-9e32-20c967308ac8/cp-frr-files/0.log" Jan 05 23:42:55 crc kubenswrapper[4910]: I0105 23:42:55.942747 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-lnr46" Jan 05 23:42:55 crc kubenswrapper[4910]: I0105 23:42:55.947507 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-lnr46" Jan 05 23:42:56 crc kubenswrapper[4910]: I0105 23:42:56.004284 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-5bddd4b946-ztwkd_62d69338-9eb3-4401-95af-3dcaf1ce48d3/controller/0.log" Jan 05 23:42:56 crc kubenswrapper[4910]: I0105 23:42:56.268163 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dhdgh_f5a1ce03-3e27-472c-9e32-20c967308ac8/cp-reloader/0.log" Jan 05 23:42:56 crc kubenswrapper[4910]: I0105 23:42:56.273806 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dhdgh_f5a1ce03-3e27-472c-9e32-20c967308ac8/cp-frr-files/0.log" Jan 05 23:42:56 crc kubenswrapper[4910]: I0105 23:42:56.318694 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dhdgh_f5a1ce03-3e27-472c-9e32-20c967308ac8/cp-reloader/0.log" Jan 05 23:42:56 crc kubenswrapper[4910]: I0105 23:42:56.335404 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dhdgh_f5a1ce03-3e27-472c-9e32-20c967308ac8/cp-metrics/0.log" Jan 05 23:42:56 crc kubenswrapper[4910]: I0105 23:42:56.565094 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dhdgh_f5a1ce03-3e27-472c-9e32-20c967308ac8/cp-frr-files/0.log" Jan 05 23:42:56 crc kubenswrapper[4910]: I0105 23:42:56.574726 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dhdgh_f5a1ce03-3e27-472c-9e32-20c967308ac8/cp-reloader/0.log" Jan 05 23:42:56 crc kubenswrapper[4910]: I0105 23:42:56.591339 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dhdgh_f5a1ce03-3e27-472c-9e32-20c967308ac8/cp-metrics/0.log" Jan 05 23:42:56 crc kubenswrapper[4910]: I0105 23:42:56.684933 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dhdgh_f5a1ce03-3e27-472c-9e32-20c967308ac8/cp-metrics/0.log" Jan 05 23:42:56 crc kubenswrapper[4910]: I0105 23:42:56.803959 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dhdgh_f5a1ce03-3e27-472c-9e32-20c967308ac8/cp-frr-files/0.log" Jan 05 23:42:56 crc kubenswrapper[4910]: I0105 23:42:56.859709 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dhdgh_f5a1ce03-3e27-472c-9e32-20c967308ac8/cp-metrics/0.log" Jan 05 23:42:56 crc kubenswrapper[4910]: I0105 23:42:56.891632 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dhdgh_f5a1ce03-3e27-472c-9e32-20c967308ac8/cp-reloader/0.log" Jan 05 23:42:57 crc kubenswrapper[4910]: I0105 23:42:57.024269 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-lnr46" podUID="8f8155b1-31fe-486a-856f-d87ad0e0e2a1" containerName="registry-server" probeResult="failure" output=< Jan 05 23:42:57 crc kubenswrapper[4910]: timeout: failed to connect service ":50051" within 1s Jan 05 23:42:57 crc kubenswrapper[4910]: > Jan 05 23:42:57 crc kubenswrapper[4910]: I0105 23:42:57.086952 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dhdgh_f5a1ce03-3e27-472c-9e32-20c967308ac8/controller/0.log" Jan 05 23:42:57 crc kubenswrapper[4910]: I0105 23:42:57.131595 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dhdgh_f5a1ce03-3e27-472c-9e32-20c967308ac8/frr-metrics/0.log" Jan 05 23:42:57 crc kubenswrapper[4910]: I0105 23:42:57.184291 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dhdgh_f5a1ce03-3e27-472c-9e32-20c967308ac8/kube-rbac-proxy/0.log" Jan 05 23:42:57 crc kubenswrapper[4910]: I0105 23:42:57.275557 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dhdgh_f5a1ce03-3e27-472c-9e32-20c967308ac8/kube-rbac-proxy-frr/0.log" Jan 05 23:42:57 crc kubenswrapper[4910]: I0105 23:42:57.373291 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dhdgh_f5a1ce03-3e27-472c-9e32-20c967308ac8/reloader/0.log" Jan 05 23:42:57 crc kubenswrapper[4910]: I0105 23:42:57.544599 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7784b6fcf-n8npn_73796739-d310-4f62-96b9-1634f13d77ae/frr-k8s-webhook-server/0.log" Jan 05 23:42:57 crc kubenswrapper[4910]: I0105 23:42:57.695671 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-74d48df479-fzsxf_1dbbe994-4065-4184-868f-98d333741069/manager/0.log" Jan 05 23:42:58 crc kubenswrapper[4910]: I0105 23:42:58.022198 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-b5f859b96-ztvsm_6c442f78-8c84-44eb-851c-836a2473aea7/webhook-server/0.log" Jan 05 23:42:58 crc kubenswrapper[4910]: I0105 23:42:58.166720 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-msrzn_a8265d43-4c9a-499d-9fb7-84292f113454/kube-rbac-proxy/0.log" Jan 05 23:42:59 crc kubenswrapper[4910]: I0105 23:42:59.033889 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-bxg5l"] Jan 05 23:42:59 crc kubenswrapper[4910]: I0105 23:42:59.042312 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-bxg5l"] Jan 05 23:42:59 crc kubenswrapper[4910]: I0105 23:42:59.096191 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-msrzn_a8265d43-4c9a-499d-9fb7-84292f113454/speaker/0.log" Jan 05 23:42:59 crc kubenswrapper[4910]: I0105 23:42:59.872007 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dhdgh_f5a1ce03-3e27-472c-9e32-20c967308ac8/frr/0.log" Jan 05 23:43:00 crc kubenswrapper[4910]: I0105 23:43:00.736270 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="72257aab-c18e-432e-a662-73955418e381" path="/var/lib/kubelet/pods/72257aab-c18e-432e-a662-73955418e381/volumes" Jan 05 23:43:05 crc kubenswrapper[4910]: I0105 23:43:05.995228 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-lnr46" Jan 05 23:43:06 crc kubenswrapper[4910]: I0105 23:43:06.059065 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-lnr46" Jan 05 23:43:06 crc kubenswrapper[4910]: I0105 23:43:06.236295 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lnr46"] Jan 05 23:43:07 crc kubenswrapper[4910]: I0105 23:43:07.874695 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-lnr46" podUID="8f8155b1-31fe-486a-856f-d87ad0e0e2a1" containerName="registry-server" containerID="cri-o://101c841176005c0af76639eee4cf91a08d671ae41637ec74d9e3b2429aac1f82" gracePeriod=2 Jan 05 23:43:08 crc kubenswrapper[4910]: I0105 23:43:08.513176 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lnr46" Jan 05 23:43:08 crc kubenswrapper[4910]: I0105 23:43:08.664921 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f8155b1-31fe-486a-856f-d87ad0e0e2a1-utilities\") pod \"8f8155b1-31fe-486a-856f-d87ad0e0e2a1\" (UID: \"8f8155b1-31fe-486a-856f-d87ad0e0e2a1\") " Jan 05 23:43:08 crc kubenswrapper[4910]: I0105 23:43:08.665294 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7fwtx\" (UniqueName: \"kubernetes.io/projected/8f8155b1-31fe-486a-856f-d87ad0e0e2a1-kube-api-access-7fwtx\") pod \"8f8155b1-31fe-486a-856f-d87ad0e0e2a1\" (UID: \"8f8155b1-31fe-486a-856f-d87ad0e0e2a1\") " Jan 05 23:43:08 crc kubenswrapper[4910]: I0105 23:43:08.665468 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f8155b1-31fe-486a-856f-d87ad0e0e2a1-catalog-content\") pod \"8f8155b1-31fe-486a-856f-d87ad0e0e2a1\" (UID: \"8f8155b1-31fe-486a-856f-d87ad0e0e2a1\") " Jan 05 23:43:08 crc kubenswrapper[4910]: I0105 23:43:08.674030 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f8155b1-31fe-486a-856f-d87ad0e0e2a1-utilities" (OuterVolumeSpecName: "utilities") pod "8f8155b1-31fe-486a-856f-d87ad0e0e2a1" (UID: "8f8155b1-31fe-486a-856f-d87ad0e0e2a1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:43:08 crc kubenswrapper[4910]: I0105 23:43:08.682397 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f8155b1-31fe-486a-856f-d87ad0e0e2a1-kube-api-access-7fwtx" (OuterVolumeSpecName: "kube-api-access-7fwtx") pod "8f8155b1-31fe-486a-856f-d87ad0e0e2a1" (UID: "8f8155b1-31fe-486a-856f-d87ad0e0e2a1"). InnerVolumeSpecName "kube-api-access-7fwtx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:43:08 crc kubenswrapper[4910]: I0105 23:43:08.767990 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7fwtx\" (UniqueName: \"kubernetes.io/projected/8f8155b1-31fe-486a-856f-d87ad0e0e2a1-kube-api-access-7fwtx\") on node \"crc\" DevicePath \"\"" Jan 05 23:43:08 crc kubenswrapper[4910]: I0105 23:43:08.768278 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f8155b1-31fe-486a-856f-d87ad0e0e2a1-utilities\") on node \"crc\" DevicePath \"\"" Jan 05 23:43:08 crc kubenswrapper[4910]: I0105 23:43:08.785237 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f8155b1-31fe-486a-856f-d87ad0e0e2a1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8f8155b1-31fe-486a-856f-d87ad0e0e2a1" (UID: "8f8155b1-31fe-486a-856f-d87ad0e0e2a1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:43:08 crc kubenswrapper[4910]: I0105 23:43:08.873291 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f8155b1-31fe-486a-856f-d87ad0e0e2a1-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 05 23:43:08 crc kubenswrapper[4910]: I0105 23:43:08.885516 4910 generic.go:334] "Generic (PLEG): container finished" podID="8f8155b1-31fe-486a-856f-d87ad0e0e2a1" containerID="101c841176005c0af76639eee4cf91a08d671ae41637ec74d9e3b2429aac1f82" exitCode=0 Jan 05 23:43:08 crc kubenswrapper[4910]: I0105 23:43:08.885552 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lnr46" event={"ID":"8f8155b1-31fe-486a-856f-d87ad0e0e2a1","Type":"ContainerDied","Data":"101c841176005c0af76639eee4cf91a08d671ae41637ec74d9e3b2429aac1f82"} Jan 05 23:43:08 crc kubenswrapper[4910]: I0105 23:43:08.885581 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lnr46" event={"ID":"8f8155b1-31fe-486a-856f-d87ad0e0e2a1","Type":"ContainerDied","Data":"2fbf8a8d8e59fa9094cf8dd2c798b81b19c0f08f689328dbcb6366ff0ce96597"} Jan 05 23:43:08 crc kubenswrapper[4910]: I0105 23:43:08.885599 4910 scope.go:117] "RemoveContainer" containerID="101c841176005c0af76639eee4cf91a08d671ae41637ec74d9e3b2429aac1f82" Jan 05 23:43:08 crc kubenswrapper[4910]: I0105 23:43:08.885720 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lnr46" Jan 05 23:43:08 crc kubenswrapper[4910]: I0105 23:43:08.930531 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lnr46"] Jan 05 23:43:08 crc kubenswrapper[4910]: I0105 23:43:08.939516 4910 scope.go:117] "RemoveContainer" containerID="7ef6925be9343b75dcb8a020d1b29ab83f09ed066be823ae04366e61d139d596" Jan 05 23:43:08 crc kubenswrapper[4910]: I0105 23:43:08.944014 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-lnr46"] Jan 05 23:43:08 crc kubenswrapper[4910]: I0105 23:43:08.967074 4910 scope.go:117] "RemoveContainer" containerID="2d6495a3c4bd3929b7c05c8376682b54cd0a6ebc8b921e6b55dfb95c8902ea47" Jan 05 23:43:09 crc kubenswrapper[4910]: I0105 23:43:09.018624 4910 scope.go:117] "RemoveContainer" containerID="101c841176005c0af76639eee4cf91a08d671ae41637ec74d9e3b2429aac1f82" Jan 05 23:43:09 crc kubenswrapper[4910]: E0105 23:43:09.019190 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"101c841176005c0af76639eee4cf91a08d671ae41637ec74d9e3b2429aac1f82\": container with ID starting with 101c841176005c0af76639eee4cf91a08d671ae41637ec74d9e3b2429aac1f82 not found: ID does not exist" containerID="101c841176005c0af76639eee4cf91a08d671ae41637ec74d9e3b2429aac1f82" Jan 05 23:43:09 crc kubenswrapper[4910]: I0105 23:43:09.019285 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"101c841176005c0af76639eee4cf91a08d671ae41637ec74d9e3b2429aac1f82"} err="failed to get container status \"101c841176005c0af76639eee4cf91a08d671ae41637ec74d9e3b2429aac1f82\": rpc error: code = NotFound desc = could not find container \"101c841176005c0af76639eee4cf91a08d671ae41637ec74d9e3b2429aac1f82\": container with ID starting with 101c841176005c0af76639eee4cf91a08d671ae41637ec74d9e3b2429aac1f82 not found: ID does not exist" Jan 05 23:43:09 crc kubenswrapper[4910]: I0105 23:43:09.019361 4910 scope.go:117] "RemoveContainer" containerID="7ef6925be9343b75dcb8a020d1b29ab83f09ed066be823ae04366e61d139d596" Jan 05 23:43:09 crc kubenswrapper[4910]: E0105 23:43:09.019725 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7ef6925be9343b75dcb8a020d1b29ab83f09ed066be823ae04366e61d139d596\": container with ID starting with 7ef6925be9343b75dcb8a020d1b29ab83f09ed066be823ae04366e61d139d596 not found: ID does not exist" containerID="7ef6925be9343b75dcb8a020d1b29ab83f09ed066be823ae04366e61d139d596" Jan 05 23:43:09 crc kubenswrapper[4910]: I0105 23:43:09.019767 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ef6925be9343b75dcb8a020d1b29ab83f09ed066be823ae04366e61d139d596"} err="failed to get container status \"7ef6925be9343b75dcb8a020d1b29ab83f09ed066be823ae04366e61d139d596\": rpc error: code = NotFound desc = could not find container \"7ef6925be9343b75dcb8a020d1b29ab83f09ed066be823ae04366e61d139d596\": container with ID starting with 7ef6925be9343b75dcb8a020d1b29ab83f09ed066be823ae04366e61d139d596 not found: ID does not exist" Jan 05 23:43:09 crc kubenswrapper[4910]: I0105 23:43:09.019800 4910 scope.go:117] "RemoveContainer" containerID="2d6495a3c4bd3929b7c05c8376682b54cd0a6ebc8b921e6b55dfb95c8902ea47" Jan 05 23:43:09 crc kubenswrapper[4910]: E0105 23:43:09.020245 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d6495a3c4bd3929b7c05c8376682b54cd0a6ebc8b921e6b55dfb95c8902ea47\": container with ID starting with 2d6495a3c4bd3929b7c05c8376682b54cd0a6ebc8b921e6b55dfb95c8902ea47 not found: ID does not exist" containerID="2d6495a3c4bd3929b7c05c8376682b54cd0a6ebc8b921e6b55dfb95c8902ea47" Jan 05 23:43:09 crc kubenswrapper[4910]: I0105 23:43:09.020695 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d6495a3c4bd3929b7c05c8376682b54cd0a6ebc8b921e6b55dfb95c8902ea47"} err="failed to get container status \"2d6495a3c4bd3929b7c05c8376682b54cd0a6ebc8b921e6b55dfb95c8902ea47\": rpc error: code = NotFound desc = could not find container \"2d6495a3c4bd3929b7c05c8376682b54cd0a6ebc8b921e6b55dfb95c8902ea47\": container with ID starting with 2d6495a3c4bd3929b7c05c8376682b54cd0a6ebc8b921e6b55dfb95c8902ea47 not found: ID does not exist" Jan 05 23:43:10 crc kubenswrapper[4910]: I0105 23:43:10.732971 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f8155b1-31fe-486a-856f-d87ad0e0e2a1" path="/var/lib/kubelet/pods/8f8155b1-31fe-486a-856f-d87ad0e0e2a1/volumes" Jan 05 23:43:10 crc kubenswrapper[4910]: I0105 23:43:10.952167 4910 patch_prober.go:28] interesting pod/machine-config-daemon-p4t85 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 05 23:43:10 crc kubenswrapper[4910]: I0105 23:43:10.952216 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 05 23:43:10 crc kubenswrapper[4910]: I0105 23:43:10.952253 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" Jan 05 23:43:10 crc kubenswrapper[4910]: I0105 23:43:10.953056 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c8796ba4f20d5ffb2bfca13c70f774542d4b41084b1e10019bcdae211e0ff9e8"} pod="openshift-machine-config-operator/machine-config-daemon-p4t85" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 05 23:43:10 crc kubenswrapper[4910]: I0105 23:43:10.953109 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerName="machine-config-daemon" containerID="cri-o://c8796ba4f20d5ffb2bfca13c70f774542d4b41084b1e10019bcdae211e0ff9e8" gracePeriod=600 Jan 05 23:43:11 crc kubenswrapper[4910]: E0105 23:43:11.071722 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:43:11 crc kubenswrapper[4910]: I0105 23:43:11.920154 4910 generic.go:334] "Generic (PLEG): container finished" podID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" containerID="c8796ba4f20d5ffb2bfca13c70f774542d4b41084b1e10019bcdae211e0ff9e8" exitCode=0 Jan 05 23:43:11 crc kubenswrapper[4910]: I0105 23:43:11.920349 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerDied","Data":"c8796ba4f20d5ffb2bfca13c70f774542d4b41084b1e10019bcdae211e0ff9e8"} Jan 05 23:43:11 crc kubenswrapper[4910]: I0105 23:43:11.920808 4910 scope.go:117] "RemoveContainer" containerID="85efc8de819de3d45b60cf4b26ac6a5b91b06bbd1c65b576dff5063a93cada55" Jan 05 23:43:11 crc kubenswrapper[4910]: I0105 23:43:11.923540 4910 scope.go:117] "RemoveContainer" containerID="c8796ba4f20d5ffb2bfca13c70f774542d4b41084b1e10019bcdae211e0ff9e8" Jan 05 23:43:11 crc kubenswrapper[4910]: E0105 23:43:11.924097 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:43:14 crc kubenswrapper[4910]: I0105 23:43:14.950162 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b_6a5f9668-f09e-4e0e-a0df-82f08d28bb9b/util/0.log" Jan 05 23:43:15 crc kubenswrapper[4910]: I0105 23:43:15.214357 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b_6a5f9668-f09e-4e0e-a0df-82f08d28bb9b/pull/0.log" Jan 05 23:43:15 crc kubenswrapper[4910]: I0105 23:43:15.228310 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b_6a5f9668-f09e-4e0e-a0df-82f08d28bb9b/pull/0.log" Jan 05 23:43:15 crc kubenswrapper[4910]: I0105 23:43:15.258868 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b_6a5f9668-f09e-4e0e-a0df-82f08d28bb9b/util/0.log" Jan 05 23:43:15 crc kubenswrapper[4910]: I0105 23:43:15.430515 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b_6a5f9668-f09e-4e0e-a0df-82f08d28bb9b/extract/0.log" Jan 05 23:43:15 crc kubenswrapper[4910]: I0105 23:43:15.455259 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b_6a5f9668-f09e-4e0e-a0df-82f08d28bb9b/pull/0.log" Jan 05 23:43:15 crc kubenswrapper[4910]: I0105 23:43:15.489859 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931arh27b_6a5f9668-f09e-4e0e-a0df-82f08d28bb9b/util/0.log" Jan 05 23:43:15 crc kubenswrapper[4910]: I0105 23:43:15.632774 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx_14994d1c-8f9e-4eab-b9fe-994f9910317b/util/0.log" Jan 05 23:43:15 crc kubenswrapper[4910]: I0105 23:43:15.871559 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx_14994d1c-8f9e-4eab-b9fe-994f9910317b/util/0.log" Jan 05 23:43:15 crc kubenswrapper[4910]: I0105 23:43:15.874105 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx_14994d1c-8f9e-4eab-b9fe-994f9910317b/pull/0.log" Jan 05 23:43:15 crc kubenswrapper[4910]: I0105 23:43:15.889082 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx_14994d1c-8f9e-4eab-b9fe-994f9910317b/pull/0.log" Jan 05 23:43:16 crc kubenswrapper[4910]: I0105 23:43:16.406124 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx_14994d1c-8f9e-4eab-b9fe-994f9910317b/util/0.log" Jan 05 23:43:16 crc kubenswrapper[4910]: I0105 23:43:16.423709 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx_14994d1c-8f9e-4eab-b9fe-994f9910317b/extract/0.log" Jan 05 23:43:16 crc kubenswrapper[4910]: I0105 23:43:16.430310 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d49gshx_14994d1c-8f9e-4eab-b9fe-994f9910317b/pull/0.log" Jan 05 23:43:16 crc kubenswrapper[4910]: I0105 23:43:16.596176 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx_8c3f7294-a422-47b1-a323-82a8ac718bdc/util/0.log" Jan 05 23:43:16 crc kubenswrapper[4910]: I0105 23:43:16.817698 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx_8c3f7294-a422-47b1-a323-82a8ac718bdc/pull/0.log" Jan 05 23:43:16 crc kubenswrapper[4910]: I0105 23:43:16.823411 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx_8c3f7294-a422-47b1-a323-82a8ac718bdc/util/0.log" Jan 05 23:43:16 crc kubenswrapper[4910]: I0105 23:43:16.901772 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx_8c3f7294-a422-47b1-a323-82a8ac718bdc/pull/0.log" Jan 05 23:43:17 crc kubenswrapper[4910]: I0105 23:43:17.032873 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx_8c3f7294-a422-47b1-a323-82a8ac718bdc/util/0.log" Jan 05 23:43:17 crc kubenswrapper[4910]: I0105 23:43:17.059818 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx_8c3f7294-a422-47b1-a323-82a8ac718bdc/pull/0.log" Jan 05 23:43:17 crc kubenswrapper[4910]: I0105 23:43:17.167372 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8mqxzx_8c3f7294-a422-47b1-a323-82a8ac718bdc/extract/0.log" Jan 05 23:43:17 crc kubenswrapper[4910]: I0105 23:43:17.221084 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx_ebca1bd5-9586-4341-9f53-ad40bf1827f0/util/0.log" Jan 05 23:43:17 crc kubenswrapper[4910]: I0105 23:43:17.391784 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx_ebca1bd5-9586-4341-9f53-ad40bf1827f0/pull/0.log" Jan 05 23:43:17 crc kubenswrapper[4910]: I0105 23:43:17.422571 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx_ebca1bd5-9586-4341-9f53-ad40bf1827f0/util/0.log" Jan 05 23:43:17 crc kubenswrapper[4910]: I0105 23:43:17.488334 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx_ebca1bd5-9586-4341-9f53-ad40bf1827f0/pull/0.log" Jan 05 23:43:17 crc kubenswrapper[4910]: I0105 23:43:17.667426 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx_ebca1bd5-9586-4341-9f53-ad40bf1827f0/pull/0.log" Jan 05 23:43:17 crc kubenswrapper[4910]: I0105 23:43:17.676641 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx_ebca1bd5-9586-4341-9f53-ad40bf1827f0/extract/0.log" Jan 05 23:43:17 crc kubenswrapper[4910]: I0105 23:43:17.693192 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08p9shx_ebca1bd5-9586-4341-9f53-ad40bf1827f0/util/0.log" Jan 05 23:43:17 crc kubenswrapper[4910]: I0105 23:43:17.868297 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-gj6th_36f587d4-ab14-4c64-9fe6-fd09211dd62c/extract-utilities/0.log" Jan 05 23:43:18 crc kubenswrapper[4910]: I0105 23:43:18.016918 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-gj6th_36f587d4-ab14-4c64-9fe6-fd09211dd62c/extract-content/0.log" Jan 05 23:43:18 crc kubenswrapper[4910]: I0105 23:43:18.084561 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-gj6th_36f587d4-ab14-4c64-9fe6-fd09211dd62c/extract-content/0.log" Jan 05 23:43:18 crc kubenswrapper[4910]: I0105 23:43:18.087523 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-gj6th_36f587d4-ab14-4c64-9fe6-fd09211dd62c/extract-utilities/0.log" Jan 05 23:43:18 crc kubenswrapper[4910]: I0105 23:43:18.222692 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-gj6th_36f587d4-ab14-4c64-9fe6-fd09211dd62c/extract-utilities/0.log" Jan 05 23:43:18 crc kubenswrapper[4910]: I0105 23:43:18.242773 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-gj6th_36f587d4-ab14-4c64-9fe6-fd09211dd62c/extract-content/0.log" Jan 05 23:43:18 crc kubenswrapper[4910]: I0105 23:43:18.440713 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-89z5z_9196e611-5468-4663-97c6-d50a40771bb4/extract-utilities/0.log" Jan 05 23:43:18 crc kubenswrapper[4910]: I0105 23:43:18.678791 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-89z5z_9196e611-5468-4663-97c6-d50a40771bb4/extract-utilities/0.log" Jan 05 23:43:18 crc kubenswrapper[4910]: I0105 23:43:18.707402 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-89z5z_9196e611-5468-4663-97c6-d50a40771bb4/extract-content/0.log" Jan 05 23:43:18 crc kubenswrapper[4910]: I0105 23:43:18.734360 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-89z5z_9196e611-5468-4663-97c6-d50a40771bb4/extract-content/0.log" Jan 05 23:43:18 crc kubenswrapper[4910]: I0105 23:43:18.920887 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-89z5z_9196e611-5468-4663-97c6-d50a40771bb4/extract-utilities/0.log" Jan 05 23:43:19 crc kubenswrapper[4910]: I0105 23:43:19.014923 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-89z5z_9196e611-5468-4663-97c6-d50a40771bb4/extract-content/0.log" Jan 05 23:43:19 crc kubenswrapper[4910]: I0105 23:43:19.141757 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-zq4vl_9e1a2196-6cd9-49e7-88b2-4e886ce030b4/marketplace-operator/0.log" Jan 05 23:43:19 crc kubenswrapper[4910]: I0105 23:43:19.142934 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-gj6th_36f587d4-ab14-4c64-9fe6-fd09211dd62c/registry-server/0.log" Jan 05 23:43:19 crc kubenswrapper[4910]: I0105 23:43:19.315842 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-swkg7_4145136a-da1b-4e59-bd3a-b7565fc66443/extract-utilities/0.log" Jan 05 23:43:19 crc kubenswrapper[4910]: I0105 23:43:19.490957 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-swkg7_4145136a-da1b-4e59-bd3a-b7565fc66443/extract-content/0.log" Jan 05 23:43:19 crc kubenswrapper[4910]: I0105 23:43:19.552653 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-swkg7_4145136a-da1b-4e59-bd3a-b7565fc66443/extract-content/0.log" Jan 05 23:43:19 crc kubenswrapper[4910]: I0105 23:43:19.556345 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-swkg7_4145136a-da1b-4e59-bd3a-b7565fc66443/extract-utilities/0.log" Jan 05 23:43:19 crc kubenswrapper[4910]: I0105 23:43:19.768754 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-swkg7_4145136a-da1b-4e59-bd3a-b7565fc66443/extract-utilities/0.log" Jan 05 23:43:19 crc kubenswrapper[4910]: I0105 23:43:19.786253 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-swkg7_4145136a-da1b-4e59-bd3a-b7565fc66443/extract-content/0.log" Jan 05 23:43:19 crc kubenswrapper[4910]: I0105 23:43:19.940004 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-89z5z_9196e611-5468-4663-97c6-d50a40771bb4/registry-server/0.log" Jan 05 23:43:19 crc kubenswrapper[4910]: I0105 23:43:19.967111 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-lc2hp_61bfc4cb-601d-4bbc-8820-59f6f8de1c63/extract-utilities/0.log" Jan 05 23:43:20 crc kubenswrapper[4910]: I0105 23:43:20.085817 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-swkg7_4145136a-da1b-4e59-bd3a-b7565fc66443/registry-server/0.log" Jan 05 23:43:20 crc kubenswrapper[4910]: I0105 23:43:20.190566 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-lc2hp_61bfc4cb-601d-4bbc-8820-59f6f8de1c63/extract-content/0.log" Jan 05 23:43:20 crc kubenswrapper[4910]: I0105 23:43:20.208177 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-lc2hp_61bfc4cb-601d-4bbc-8820-59f6f8de1c63/extract-utilities/0.log" Jan 05 23:43:20 crc kubenswrapper[4910]: I0105 23:43:20.237211 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-lc2hp_61bfc4cb-601d-4bbc-8820-59f6f8de1c63/extract-content/0.log" Jan 05 23:43:20 crc kubenswrapper[4910]: I0105 23:43:20.353055 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-lc2hp_61bfc4cb-601d-4bbc-8820-59f6f8de1c63/extract-utilities/0.log" Jan 05 23:43:20 crc kubenswrapper[4910]: I0105 23:43:20.369965 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-lc2hp_61bfc4cb-601d-4bbc-8820-59f6f8de1c63/extract-content/0.log" Jan 05 23:43:21 crc kubenswrapper[4910]: I0105 23:43:21.176809 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-lc2hp_61bfc4cb-601d-4bbc-8820-59f6f8de1c63/registry-server/0.log" Jan 05 23:43:26 crc kubenswrapper[4910]: I0105 23:43:26.721875 4910 scope.go:117] "RemoveContainer" containerID="c8796ba4f20d5ffb2bfca13c70f774542d4b41084b1e10019bcdae211e0ff9e8" Jan 05 23:43:26 crc kubenswrapper[4910]: E0105 23:43:26.722702 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:43:35 crc kubenswrapper[4910]: I0105 23:43:35.296765 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-hb5xs_9a834e51-2fcd-4e02-ab87-560d50993337/prometheus-operator/0.log" Jan 05 23:43:35 crc kubenswrapper[4910]: I0105 23:43:35.475152 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-77fc4ff47-l5mlw_dd44f3c6-e484-4843-8ce1-3c771202ebf0/prometheus-operator-admission-webhook/0.log" Jan 05 23:43:35 crc kubenswrapper[4910]: I0105 23:43:35.575911 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-77fc4ff47-p2hgp_d4f09a8f-4fb6-4720-8fb9-fb2f480e9384/prometheus-operator-admission-webhook/0.log" Jan 05 23:43:35 crc kubenswrapper[4910]: I0105 23:43:35.647995 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-7ndlc_fd9aa314-35df-4c05-92fd-aa1127e8e80a/operator/0.log" Jan 05 23:43:35 crc kubenswrapper[4910]: I0105 23:43:35.781433 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-xf7mz_d6b4354f-e670-44e7-a48e-4252a4ac68a6/perses-operator/0.log" Jan 05 23:43:39 crc kubenswrapper[4910]: I0105 23:43:39.722409 4910 scope.go:117] "RemoveContainer" containerID="c8796ba4f20d5ffb2bfca13c70f774542d4b41084b1e10019bcdae211e0ff9e8" Jan 05 23:43:39 crc kubenswrapper[4910]: E0105 23:43:39.723906 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:43:50 crc kubenswrapper[4910]: I0105 23:43:50.722665 4910 scope.go:117] "RemoveContainer" containerID="c8796ba4f20d5ffb2bfca13c70f774542d4b41084b1e10019bcdae211e0ff9e8" Jan 05 23:43:50 crc kubenswrapper[4910]: E0105 23:43:50.723914 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:43:52 crc kubenswrapper[4910]: I0105 23:43:52.259041 4910 scope.go:117] "RemoveContainer" containerID="d98e24434f203249285386fb775c8a0b61c19518fffb2cd9c4906a140a466cf3" Jan 05 23:44:01 crc kubenswrapper[4910]: I0105 23:44:01.722317 4910 scope.go:117] "RemoveContainer" containerID="c8796ba4f20d5ffb2bfca13c70f774542d4b41084b1e10019bcdae211e0ff9e8" Jan 05 23:44:01 crc kubenswrapper[4910]: E0105 23:44:01.723035 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:44:13 crc kubenswrapper[4910]: I0105 23:44:13.724470 4910 scope.go:117] "RemoveContainer" containerID="c8796ba4f20d5ffb2bfca13c70f774542d4b41084b1e10019bcdae211e0ff9e8" Jan 05 23:44:13 crc kubenswrapper[4910]: E0105 23:44:13.725235 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:44:28 crc kubenswrapper[4910]: I0105 23:44:28.732532 4910 scope.go:117] "RemoveContainer" containerID="c8796ba4f20d5ffb2bfca13c70f774542d4b41084b1e10019bcdae211e0ff9e8" Jan 05 23:44:28 crc kubenswrapper[4910]: E0105 23:44:28.733476 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:44:40 crc kubenswrapper[4910]: I0105 23:44:40.722075 4910 scope.go:117] "RemoveContainer" containerID="c8796ba4f20d5ffb2bfca13c70f774542d4b41084b1e10019bcdae211e0ff9e8" Jan 05 23:44:40 crc kubenswrapper[4910]: E0105 23:44:40.723267 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:44:54 crc kubenswrapper[4910]: I0105 23:44:54.722874 4910 scope.go:117] "RemoveContainer" containerID="c8796ba4f20d5ffb2bfca13c70f774542d4b41084b1e10019bcdae211e0ff9e8" Jan 05 23:44:54 crc kubenswrapper[4910]: E0105 23:44:54.724167 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:45:00 crc kubenswrapper[4910]: I0105 23:45:00.185304 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460945-vwqnp"] Jan 05 23:45:00 crc kubenswrapper[4910]: E0105 23:45:00.186649 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f8155b1-31fe-486a-856f-d87ad0e0e2a1" containerName="extract-content" Jan 05 23:45:00 crc kubenswrapper[4910]: I0105 23:45:00.186673 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f8155b1-31fe-486a-856f-d87ad0e0e2a1" containerName="extract-content" Jan 05 23:45:00 crc kubenswrapper[4910]: E0105 23:45:00.186710 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f8155b1-31fe-486a-856f-d87ad0e0e2a1" containerName="extract-utilities" Jan 05 23:45:00 crc kubenswrapper[4910]: I0105 23:45:00.186724 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f8155b1-31fe-486a-856f-d87ad0e0e2a1" containerName="extract-utilities" Jan 05 23:45:00 crc kubenswrapper[4910]: E0105 23:45:00.186781 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f8155b1-31fe-486a-856f-d87ad0e0e2a1" containerName="registry-server" Jan 05 23:45:00 crc kubenswrapper[4910]: I0105 23:45:00.186794 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f8155b1-31fe-486a-856f-d87ad0e0e2a1" containerName="registry-server" Jan 05 23:45:00 crc kubenswrapper[4910]: I0105 23:45:00.187214 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f8155b1-31fe-486a-856f-d87ad0e0e2a1" containerName="registry-server" Jan 05 23:45:00 crc kubenswrapper[4910]: I0105 23:45:00.188559 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460945-vwqnp" Jan 05 23:45:00 crc kubenswrapper[4910]: I0105 23:45:00.192442 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 05 23:45:00 crc kubenswrapper[4910]: I0105 23:45:00.199694 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 05 23:45:00 crc kubenswrapper[4910]: I0105 23:45:00.216997 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460945-vwqnp"] Jan 05 23:45:00 crc kubenswrapper[4910]: I0105 23:45:00.286243 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/48984436-beba-405e-af6c-15960f9534db-secret-volume\") pod \"collect-profiles-29460945-vwqnp\" (UID: \"48984436-beba-405e-af6c-15960f9534db\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460945-vwqnp" Jan 05 23:45:00 crc kubenswrapper[4910]: I0105 23:45:00.286419 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/48984436-beba-405e-af6c-15960f9534db-config-volume\") pod \"collect-profiles-29460945-vwqnp\" (UID: \"48984436-beba-405e-af6c-15960f9534db\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460945-vwqnp" Jan 05 23:45:00 crc kubenswrapper[4910]: I0105 23:45:00.286763 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n7q7c\" (UniqueName: \"kubernetes.io/projected/48984436-beba-405e-af6c-15960f9534db-kube-api-access-n7q7c\") pod \"collect-profiles-29460945-vwqnp\" (UID: \"48984436-beba-405e-af6c-15960f9534db\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460945-vwqnp" Jan 05 23:45:00 crc kubenswrapper[4910]: I0105 23:45:00.388985 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n7q7c\" (UniqueName: \"kubernetes.io/projected/48984436-beba-405e-af6c-15960f9534db-kube-api-access-n7q7c\") pod \"collect-profiles-29460945-vwqnp\" (UID: \"48984436-beba-405e-af6c-15960f9534db\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460945-vwqnp" Jan 05 23:45:00 crc kubenswrapper[4910]: I0105 23:45:00.389231 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/48984436-beba-405e-af6c-15960f9534db-secret-volume\") pod \"collect-profiles-29460945-vwqnp\" (UID: \"48984436-beba-405e-af6c-15960f9534db\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460945-vwqnp" Jan 05 23:45:00 crc kubenswrapper[4910]: I0105 23:45:00.389275 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/48984436-beba-405e-af6c-15960f9534db-config-volume\") pod \"collect-profiles-29460945-vwqnp\" (UID: \"48984436-beba-405e-af6c-15960f9534db\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460945-vwqnp" Jan 05 23:45:00 crc kubenswrapper[4910]: I0105 23:45:00.390372 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/48984436-beba-405e-af6c-15960f9534db-config-volume\") pod \"collect-profiles-29460945-vwqnp\" (UID: \"48984436-beba-405e-af6c-15960f9534db\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460945-vwqnp" Jan 05 23:45:00 crc kubenswrapper[4910]: I0105 23:45:00.407843 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/48984436-beba-405e-af6c-15960f9534db-secret-volume\") pod \"collect-profiles-29460945-vwqnp\" (UID: \"48984436-beba-405e-af6c-15960f9534db\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460945-vwqnp" Jan 05 23:45:00 crc kubenswrapper[4910]: I0105 23:45:00.431315 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n7q7c\" (UniqueName: \"kubernetes.io/projected/48984436-beba-405e-af6c-15960f9534db-kube-api-access-n7q7c\") pod \"collect-profiles-29460945-vwqnp\" (UID: \"48984436-beba-405e-af6c-15960f9534db\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29460945-vwqnp" Jan 05 23:45:00 crc kubenswrapper[4910]: I0105 23:45:00.519307 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460945-vwqnp" Jan 05 23:45:01 crc kubenswrapper[4910]: I0105 23:45:01.072345 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460945-vwqnp"] Jan 05 23:45:01 crc kubenswrapper[4910]: W0105 23:45:01.083773 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod48984436_beba_405e_af6c_15960f9534db.slice/crio-043dcaf9c5613f25d53a84079d2e546c2417510f128d6aaa113fb7421a7892cb WatchSource:0}: Error finding container 043dcaf9c5613f25d53a84079d2e546c2417510f128d6aaa113fb7421a7892cb: Status 404 returned error can't find the container with id 043dcaf9c5613f25d53a84079d2e546c2417510f128d6aaa113fb7421a7892cb Jan 05 23:45:01 crc kubenswrapper[4910]: I0105 23:45:01.285352 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29460945-vwqnp" event={"ID":"48984436-beba-405e-af6c-15960f9534db","Type":"ContainerStarted","Data":"abe3eb12b97a2f0d5bbc2d123a99b6d39f08b6b18c6a7fa8f728d0621bbd40fb"} Jan 05 23:45:01 crc kubenswrapper[4910]: I0105 23:45:01.285445 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29460945-vwqnp" event={"ID":"48984436-beba-405e-af6c-15960f9534db","Type":"ContainerStarted","Data":"043dcaf9c5613f25d53a84079d2e546c2417510f128d6aaa113fb7421a7892cb"} Jan 05 23:45:01 crc kubenswrapper[4910]: I0105 23:45:01.302679 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29460945-vwqnp" podStartSLOduration=1.302654266 podStartE2EDuration="1.302654266s" podCreationTimestamp="2026-01-05 23:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-05 23:45:01.301366345 +0000 UTC m=+6832.878864035" watchObservedRunningTime="2026-01-05 23:45:01.302654266 +0000 UTC m=+6832.880151976" Jan 05 23:45:01 crc kubenswrapper[4910]: E0105 23:45:01.820260 4910 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod48984436_beba_405e_af6c_15960f9534db.slice/crio-conmon-abe3eb12b97a2f0d5bbc2d123a99b6d39f08b6b18c6a7fa8f728d0621bbd40fb.scope\": RecentStats: unable to find data in memory cache]" Jan 05 23:45:02 crc kubenswrapper[4910]: I0105 23:45:02.295443 4910 generic.go:334] "Generic (PLEG): container finished" podID="48984436-beba-405e-af6c-15960f9534db" containerID="abe3eb12b97a2f0d5bbc2d123a99b6d39f08b6b18c6a7fa8f728d0621bbd40fb" exitCode=0 Jan 05 23:45:02 crc kubenswrapper[4910]: I0105 23:45:02.296163 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29460945-vwqnp" event={"ID":"48984436-beba-405e-af6c-15960f9534db","Type":"ContainerDied","Data":"abe3eb12b97a2f0d5bbc2d123a99b6d39f08b6b18c6a7fa8f728d0621bbd40fb"} Jan 05 23:45:03 crc kubenswrapper[4910]: I0105 23:45:03.066437 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-create-f99t2"] Jan 05 23:45:03 crc kubenswrapper[4910]: I0105 23:45:03.079967 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-5735-account-create-update-h89vj"] Jan 05 23:45:03 crc kubenswrapper[4910]: I0105 23:45:03.093050 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-create-f99t2"] Jan 05 23:45:03 crc kubenswrapper[4910]: I0105 23:45:03.108824 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-5735-account-create-update-h89vj"] Jan 05 23:45:03 crc kubenswrapper[4910]: I0105 23:45:03.716488 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460945-vwqnp" Jan 05 23:45:03 crc kubenswrapper[4910]: I0105 23:45:03.875696 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/48984436-beba-405e-af6c-15960f9534db-secret-volume\") pod \"48984436-beba-405e-af6c-15960f9534db\" (UID: \"48984436-beba-405e-af6c-15960f9534db\") " Jan 05 23:45:03 crc kubenswrapper[4910]: I0105 23:45:03.875816 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n7q7c\" (UniqueName: \"kubernetes.io/projected/48984436-beba-405e-af6c-15960f9534db-kube-api-access-n7q7c\") pod \"48984436-beba-405e-af6c-15960f9534db\" (UID: \"48984436-beba-405e-af6c-15960f9534db\") " Jan 05 23:45:03 crc kubenswrapper[4910]: I0105 23:45:03.876156 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/48984436-beba-405e-af6c-15960f9534db-config-volume\") pod \"48984436-beba-405e-af6c-15960f9534db\" (UID: \"48984436-beba-405e-af6c-15960f9534db\") " Jan 05 23:45:03 crc kubenswrapper[4910]: I0105 23:45:03.877595 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/48984436-beba-405e-af6c-15960f9534db-config-volume" (OuterVolumeSpecName: "config-volume") pod "48984436-beba-405e-af6c-15960f9534db" (UID: "48984436-beba-405e-af6c-15960f9534db"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 05 23:45:03 crc kubenswrapper[4910]: I0105 23:45:03.885816 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48984436-beba-405e-af6c-15960f9534db-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "48984436-beba-405e-af6c-15960f9534db" (UID: "48984436-beba-405e-af6c-15960f9534db"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 05 23:45:03 crc kubenswrapper[4910]: I0105 23:45:03.893042 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/48984436-beba-405e-af6c-15960f9534db-kube-api-access-n7q7c" (OuterVolumeSpecName: "kube-api-access-n7q7c") pod "48984436-beba-405e-af6c-15960f9534db" (UID: "48984436-beba-405e-af6c-15960f9534db"). InnerVolumeSpecName "kube-api-access-n7q7c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:45:03 crc kubenswrapper[4910]: I0105 23:45:03.979993 4910 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/48984436-beba-405e-af6c-15960f9534db-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 05 23:45:03 crc kubenswrapper[4910]: I0105 23:45:03.980048 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n7q7c\" (UniqueName: \"kubernetes.io/projected/48984436-beba-405e-af6c-15960f9534db-kube-api-access-n7q7c\") on node \"crc\" DevicePath \"\"" Jan 05 23:45:03 crc kubenswrapper[4910]: I0105 23:45:03.980070 4910 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/48984436-beba-405e-af6c-15960f9534db-config-volume\") on node \"crc\" DevicePath \"\"" Jan 05 23:45:04 crc kubenswrapper[4910]: I0105 23:45:04.321292 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29460945-vwqnp" event={"ID":"48984436-beba-405e-af6c-15960f9534db","Type":"ContainerDied","Data":"043dcaf9c5613f25d53a84079d2e546c2417510f128d6aaa113fb7421a7892cb"} Jan 05 23:45:04 crc kubenswrapper[4910]: I0105 23:45:04.321560 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="043dcaf9c5613f25d53a84079d2e546c2417510f128d6aaa113fb7421a7892cb" Jan 05 23:45:04 crc kubenswrapper[4910]: I0105 23:45:04.321407 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29460945-vwqnp" Jan 05 23:45:04 crc kubenswrapper[4910]: I0105 23:45:04.410223 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460900-fhd54"] Jan 05 23:45:04 crc kubenswrapper[4910]: I0105 23:45:04.414436 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29460900-fhd54"] Jan 05 23:45:04 crc kubenswrapper[4910]: I0105 23:45:04.733878 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8" path="/var/lib/kubelet/pods/36c7e70d-6b03-4cf9-9ef8-a1afa8af62c8/volumes" Jan 05 23:45:04 crc kubenswrapper[4910]: I0105 23:45:04.734684 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79b2932f-d96b-45db-bea6-e821af5a8388" path="/var/lib/kubelet/pods/79b2932f-d96b-45db-bea6-e821af5a8388/volumes" Jan 05 23:45:04 crc kubenswrapper[4910]: I0105 23:45:04.735575 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a43b047b-40b6-4c3d-aac2-eb352229d2c2" path="/var/lib/kubelet/pods/a43b047b-40b6-4c3d-aac2-eb352229d2c2/volumes" Jan 05 23:45:07 crc kubenswrapper[4910]: I0105 23:45:07.722456 4910 scope.go:117] "RemoveContainer" containerID="c8796ba4f20d5ffb2bfca13c70f774542d4b41084b1e10019bcdae211e0ff9e8" Jan 05 23:45:07 crc kubenswrapper[4910]: E0105 23:45:07.723574 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:45:15 crc kubenswrapper[4910]: I0105 23:45:15.076967 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-2w8sc"] Jan 05 23:45:15 crc kubenswrapper[4910]: I0105 23:45:15.116521 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-2w8sc"] Jan 05 23:45:16 crc kubenswrapper[4910]: I0105 23:45:16.739229 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2ff657b-9e47-472a-9ff1-eda124dd4db8" path="/var/lib/kubelet/pods/b2ff657b-9e47-472a-9ff1-eda124dd4db8/volumes" Jan 05 23:45:17 crc kubenswrapper[4910]: I0105 23:45:17.029621 4910 generic.go:334] "Generic (PLEG): container finished" podID="3703c96e-c6c7-4742-932f-9943b276b9d2" containerID="20b06b0cc7eb7acb35fe07962414914ec1f233139a9836f96132f41c9cf1b72f" exitCode=0 Jan 05 23:45:17 crc kubenswrapper[4910]: I0105 23:45:17.029704 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-566mg/must-gather-5tnhd" event={"ID":"3703c96e-c6c7-4742-932f-9943b276b9d2","Type":"ContainerDied","Data":"20b06b0cc7eb7acb35fe07962414914ec1f233139a9836f96132f41c9cf1b72f"} Jan 05 23:45:17 crc kubenswrapper[4910]: I0105 23:45:17.030921 4910 scope.go:117] "RemoveContainer" containerID="20b06b0cc7eb7acb35fe07962414914ec1f233139a9836f96132f41c9cf1b72f" Jan 05 23:45:17 crc kubenswrapper[4910]: I0105 23:45:17.183896 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-566mg_must-gather-5tnhd_3703c96e-c6c7-4742-932f-9943b276b9d2/gather/0.log" Jan 05 23:45:19 crc kubenswrapper[4910]: I0105 23:45:19.722537 4910 scope.go:117] "RemoveContainer" containerID="c8796ba4f20d5ffb2bfca13c70f774542d4b41084b1e10019bcdae211e0ff9e8" Jan 05 23:45:19 crc kubenswrapper[4910]: E0105 23:45:19.723366 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:45:24 crc kubenswrapper[4910]: I0105 23:45:24.983159 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-566mg/must-gather-5tnhd"] Jan 05 23:45:24 crc kubenswrapper[4910]: I0105 23:45:24.983935 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-566mg/must-gather-5tnhd" podUID="3703c96e-c6c7-4742-932f-9943b276b9d2" containerName="copy" containerID="cri-o://25439afa7f57a4751d807be1e94cba4de39d94a1fd757b1cfa13fb42ab361431" gracePeriod=2 Jan 05 23:45:24 crc kubenswrapper[4910]: I0105 23:45:24.994952 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-566mg/must-gather-5tnhd"] Jan 05 23:45:25 crc kubenswrapper[4910]: I0105 23:45:25.140400 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-566mg_must-gather-5tnhd_3703c96e-c6c7-4742-932f-9943b276b9d2/copy/0.log" Jan 05 23:45:25 crc kubenswrapper[4910]: I0105 23:45:25.140991 4910 generic.go:334] "Generic (PLEG): container finished" podID="3703c96e-c6c7-4742-932f-9943b276b9d2" containerID="25439afa7f57a4751d807be1e94cba4de39d94a1fd757b1cfa13fb42ab361431" exitCode=143 Jan 05 23:45:25 crc kubenswrapper[4910]: I0105 23:45:25.549037 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-566mg_must-gather-5tnhd_3703c96e-c6c7-4742-932f-9943b276b9d2/copy/0.log" Jan 05 23:45:25 crc kubenswrapper[4910]: I0105 23:45:25.549859 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-566mg/must-gather-5tnhd" Jan 05 23:45:25 crc kubenswrapper[4910]: I0105 23:45:25.694486 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sdqhc\" (UniqueName: \"kubernetes.io/projected/3703c96e-c6c7-4742-932f-9943b276b9d2-kube-api-access-sdqhc\") pod \"3703c96e-c6c7-4742-932f-9943b276b9d2\" (UID: \"3703c96e-c6c7-4742-932f-9943b276b9d2\") " Jan 05 23:45:25 crc kubenswrapper[4910]: I0105 23:45:25.694620 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3703c96e-c6c7-4742-932f-9943b276b9d2-must-gather-output\") pod \"3703c96e-c6c7-4742-932f-9943b276b9d2\" (UID: \"3703c96e-c6c7-4742-932f-9943b276b9d2\") " Jan 05 23:45:25 crc kubenswrapper[4910]: I0105 23:45:25.700970 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3703c96e-c6c7-4742-932f-9943b276b9d2-kube-api-access-sdqhc" (OuterVolumeSpecName: "kube-api-access-sdqhc") pod "3703c96e-c6c7-4742-932f-9943b276b9d2" (UID: "3703c96e-c6c7-4742-932f-9943b276b9d2"). InnerVolumeSpecName "kube-api-access-sdqhc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 05 23:45:25 crc kubenswrapper[4910]: I0105 23:45:25.798609 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sdqhc\" (UniqueName: \"kubernetes.io/projected/3703c96e-c6c7-4742-932f-9943b276b9d2-kube-api-access-sdqhc\") on node \"crc\" DevicePath \"\"" Jan 05 23:45:25 crc kubenswrapper[4910]: I0105 23:45:25.845364 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3703c96e-c6c7-4742-932f-9943b276b9d2-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "3703c96e-c6c7-4742-932f-9943b276b9d2" (UID: "3703c96e-c6c7-4742-932f-9943b276b9d2"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 05 23:45:25 crc kubenswrapper[4910]: I0105 23:45:25.903271 4910 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3703c96e-c6c7-4742-932f-9943b276b9d2-must-gather-output\") on node \"crc\" DevicePath \"\"" Jan 05 23:45:26 crc kubenswrapper[4910]: I0105 23:45:26.158958 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-566mg_must-gather-5tnhd_3703c96e-c6c7-4742-932f-9943b276b9d2/copy/0.log" Jan 05 23:45:26 crc kubenswrapper[4910]: I0105 23:45:26.159551 4910 scope.go:117] "RemoveContainer" containerID="25439afa7f57a4751d807be1e94cba4de39d94a1fd757b1cfa13fb42ab361431" Jan 05 23:45:26 crc kubenswrapper[4910]: I0105 23:45:26.159686 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-566mg/must-gather-5tnhd" Jan 05 23:45:26 crc kubenswrapper[4910]: I0105 23:45:26.199647 4910 scope.go:117] "RemoveContainer" containerID="20b06b0cc7eb7acb35fe07962414914ec1f233139a9836f96132f41c9cf1b72f" Jan 05 23:45:26 crc kubenswrapper[4910]: I0105 23:45:26.739457 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3703c96e-c6c7-4742-932f-9943b276b9d2" path="/var/lib/kubelet/pods/3703c96e-c6c7-4742-932f-9943b276b9d2/volumes" Jan 05 23:45:33 crc kubenswrapper[4910]: I0105 23:45:33.721555 4910 scope.go:117] "RemoveContainer" containerID="c8796ba4f20d5ffb2bfca13c70f774542d4b41084b1e10019bcdae211e0ff9e8" Jan 05 23:45:33 crc kubenswrapper[4910]: E0105 23:45:33.722337 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:45:35 crc kubenswrapper[4910]: I0105 23:45:35.039266 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-db-create-n4xcf"] Jan 05 23:45:35 crc kubenswrapper[4910]: I0105 23:45:35.057316 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-db-create-n4xcf"] Jan 05 23:45:36 crc kubenswrapper[4910]: I0105 23:45:36.034994 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-1129-account-create-update-qbxlb"] Jan 05 23:45:36 crc kubenswrapper[4910]: I0105 23:45:36.044622 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-1129-account-create-update-qbxlb"] Jan 05 23:45:36 crc kubenswrapper[4910]: I0105 23:45:36.736609 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1" path="/var/lib/kubelet/pods/93c55ed0-19ba-4e51-9e1f-365d6bfc9ea1/volumes" Jan 05 23:45:36 crc kubenswrapper[4910]: I0105 23:45:36.737749 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2c2edf2-0678-437b-aa0a-1b5448266d93" path="/var/lib/kubelet/pods/b2c2edf2-0678-437b-aa0a-1b5448266d93/volumes" Jan 05 23:45:47 crc kubenswrapper[4910]: I0105 23:45:47.073905 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-db-sync-dcfxt"] Jan 05 23:45:47 crc kubenswrapper[4910]: I0105 23:45:47.089219 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-db-sync-dcfxt"] Jan 05 23:45:47 crc kubenswrapper[4910]: I0105 23:45:47.722934 4910 scope.go:117] "RemoveContainer" containerID="c8796ba4f20d5ffb2bfca13c70f774542d4b41084b1e10019bcdae211e0ff9e8" Jan 05 23:45:47 crc kubenswrapper[4910]: E0105 23:45:47.723900 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:45:48 crc kubenswrapper[4910]: I0105 23:45:48.769761 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e41b44c-e6c9-473f-870e-52fc55ef73ff" path="/var/lib/kubelet/pods/1e41b44c-e6c9-473f-870e-52fc55ef73ff/volumes" Jan 05 23:45:52 crc kubenswrapper[4910]: I0105 23:45:52.434821 4910 scope.go:117] "RemoveContainer" containerID="cb864465201e2009fa8c3174c8883e161ca5c4de613f1b0ae6e9d483467c3167" Jan 05 23:45:52 crc kubenswrapper[4910]: I0105 23:45:52.461053 4910 scope.go:117] "RemoveContainer" containerID="43d4d33eb21111d88b658abbd42893a7e3e86c9ae6f69d028289560f18a8e5b8" Jan 05 23:45:52 crc kubenswrapper[4910]: I0105 23:45:52.538559 4910 scope.go:117] "RemoveContainer" containerID="c1f36164834643d3fe8de7801f0e60d65e3721196177978c65664f8ca20969f7" Jan 05 23:45:52 crc kubenswrapper[4910]: I0105 23:45:52.585228 4910 scope.go:117] "RemoveContainer" containerID="b429ebe7e9a6cd4ee878dc5d7803620fe414fc2f451430fad629ac4f3937e289" Jan 05 23:45:52 crc kubenswrapper[4910]: I0105 23:45:52.637531 4910 scope.go:117] "RemoveContainer" containerID="e74b7dcd4b4780a90abe19f26974ed7a15f032529791505154b0e145f1d1da1a" Jan 05 23:45:52 crc kubenswrapper[4910]: I0105 23:45:52.664476 4910 scope.go:117] "RemoveContainer" containerID="ee51b91ed8b3f0c48535ba0eee3cf31112722e5a2b1ea3521a46688075f1c2d8" Jan 05 23:45:52 crc kubenswrapper[4910]: I0105 23:45:52.724661 4910 scope.go:117] "RemoveContainer" containerID="592bd69773bc19043084f89888fd6432d729cf8a489de2e5d99149db08a9fd59" Jan 05 23:45:59 crc kubenswrapper[4910]: I0105 23:45:59.722179 4910 scope.go:117] "RemoveContainer" containerID="c8796ba4f20d5ffb2bfca13c70f774542d4b41084b1e10019bcdae211e0ff9e8" Jan 05 23:45:59 crc kubenswrapper[4910]: E0105 23:45:59.723111 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:46:10 crc kubenswrapper[4910]: I0105 23:46:10.722626 4910 scope.go:117] "RemoveContainer" containerID="c8796ba4f20d5ffb2bfca13c70f774542d4b41084b1e10019bcdae211e0ff9e8" Jan 05 23:46:10 crc kubenswrapper[4910]: E0105 23:46:10.724579 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:46:21 crc kubenswrapper[4910]: I0105 23:46:21.722447 4910 scope.go:117] "RemoveContainer" containerID="c8796ba4f20d5ffb2bfca13c70f774542d4b41084b1e10019bcdae211e0ff9e8" Jan 05 23:46:21 crc kubenswrapper[4910]: E0105 23:46:21.723572 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:46:34 crc kubenswrapper[4910]: I0105 23:46:34.721895 4910 scope.go:117] "RemoveContainer" containerID="c8796ba4f20d5ffb2bfca13c70f774542d4b41084b1e10019bcdae211e0ff9e8" Jan 05 23:46:34 crc kubenswrapper[4910]: E0105 23:46:34.722907 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:46:47 crc kubenswrapper[4910]: I0105 23:46:47.721945 4910 scope.go:117] "RemoveContainer" containerID="c8796ba4f20d5ffb2bfca13c70f774542d4b41084b1e10019bcdae211e0ff9e8" Jan 05 23:46:47 crc kubenswrapper[4910]: E0105 23:46:47.723100 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:47:00 crc kubenswrapper[4910]: I0105 23:47:00.723149 4910 scope.go:117] "RemoveContainer" containerID="c8796ba4f20d5ffb2bfca13c70f774542d4b41084b1e10019bcdae211e0ff9e8" Jan 05 23:47:00 crc kubenswrapper[4910]: E0105 23:47:00.724062 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:47:14 crc kubenswrapper[4910]: I0105 23:47:14.724334 4910 scope.go:117] "RemoveContainer" containerID="c8796ba4f20d5ffb2bfca13c70f774542d4b41084b1e10019bcdae211e0ff9e8" Jan 05 23:47:14 crc kubenswrapper[4910]: E0105 23:47:14.725214 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:47:27 crc kubenswrapper[4910]: I0105 23:47:27.721871 4910 scope.go:117] "RemoveContainer" containerID="c8796ba4f20d5ffb2bfca13c70f774542d4b41084b1e10019bcdae211e0ff9e8" Jan 05 23:47:27 crc kubenswrapper[4910]: E0105 23:47:27.722989 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:47:41 crc kubenswrapper[4910]: I0105 23:47:41.722438 4910 scope.go:117] "RemoveContainer" containerID="c8796ba4f20d5ffb2bfca13c70f774542d4b41084b1e10019bcdae211e0ff9e8" Jan 05 23:47:41 crc kubenswrapper[4910]: E0105 23:47:41.723660 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:47:52 crc kubenswrapper[4910]: I0105 23:47:52.721492 4910 scope.go:117] "RemoveContainer" containerID="c8796ba4f20d5ffb2bfca13c70f774542d4b41084b1e10019bcdae211e0ff9e8" Jan 05 23:47:52 crc kubenswrapper[4910]: E0105 23:47:52.722974 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:48:03 crc kubenswrapper[4910]: I0105 23:48:03.722308 4910 scope.go:117] "RemoveContainer" containerID="c8796ba4f20d5ffb2bfca13c70f774542d4b41084b1e10019bcdae211e0ff9e8" Jan 05 23:48:03 crc kubenswrapper[4910]: E0105 23:48:03.723443 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-p4t85_openshift-machine-config-operator(1180e67b-86e7-4aa8-b84f-55e2a18a7918)\"" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" podUID="1180e67b-86e7-4aa8-b84f-55e2a18a7918" Jan 05 23:48:16 crc kubenswrapper[4910]: I0105 23:48:16.722101 4910 scope.go:117] "RemoveContainer" containerID="c8796ba4f20d5ffb2bfca13c70f774542d4b41084b1e10019bcdae211e0ff9e8" Jan 05 23:48:17 crc kubenswrapper[4910]: I0105 23:48:17.234432 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-p4t85" event={"ID":"1180e67b-86e7-4aa8-b84f-55e2a18a7918","Type":"ContainerStarted","Data":"1b6512acd90fc25ff07f430d995f70f435c1c9d00ebe2c9abb3c5edff04ed2b7"} Jan 05 23:48:43 crc kubenswrapper[4910]: I0105 23:48:43.406154 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-jpbcb"] Jan 05 23:48:43 crc kubenswrapper[4910]: E0105 23:48:43.407345 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48984436-beba-405e-af6c-15960f9534db" containerName="collect-profiles" Jan 05 23:48:43 crc kubenswrapper[4910]: I0105 23:48:43.407364 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="48984436-beba-405e-af6c-15960f9534db" containerName="collect-profiles" Jan 05 23:48:43 crc kubenswrapper[4910]: E0105 23:48:43.407391 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3703c96e-c6c7-4742-932f-9943b276b9d2" containerName="gather" Jan 05 23:48:43 crc kubenswrapper[4910]: I0105 23:48:43.407400 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="3703c96e-c6c7-4742-932f-9943b276b9d2" containerName="gather" Jan 05 23:48:43 crc kubenswrapper[4910]: E0105 23:48:43.407420 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3703c96e-c6c7-4742-932f-9943b276b9d2" containerName="copy" Jan 05 23:48:43 crc kubenswrapper[4910]: I0105 23:48:43.407429 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="3703c96e-c6c7-4742-932f-9943b276b9d2" containerName="copy" Jan 05 23:48:43 crc kubenswrapper[4910]: I0105 23:48:43.407723 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="48984436-beba-405e-af6c-15960f9534db" containerName="collect-profiles" Jan 05 23:48:43 crc kubenswrapper[4910]: I0105 23:48:43.407740 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="3703c96e-c6c7-4742-932f-9943b276b9d2" containerName="gather" Jan 05 23:48:43 crc kubenswrapper[4910]: I0105 23:48:43.407773 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="3703c96e-c6c7-4742-932f-9943b276b9d2" containerName="copy" Jan 05 23:48:43 crc kubenswrapper[4910]: I0105 23:48:43.410596 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jpbcb" Jan 05 23:48:43 crc kubenswrapper[4910]: I0105 23:48:43.421155 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jpbcb"] Jan 05 23:48:43 crc kubenswrapper[4910]: I0105 23:48:43.430597 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/684847d4-4312-4f43-a1c4-3bea622a95b2-utilities\") pod \"community-operators-jpbcb\" (UID: \"684847d4-4312-4f43-a1c4-3bea622a95b2\") " pod="openshift-marketplace/community-operators-jpbcb" Jan 05 23:48:43 crc kubenswrapper[4910]: I0105 23:48:43.430729 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/684847d4-4312-4f43-a1c4-3bea622a95b2-catalog-content\") pod \"community-operators-jpbcb\" (UID: \"684847d4-4312-4f43-a1c4-3bea622a95b2\") " pod="openshift-marketplace/community-operators-jpbcb" Jan 05 23:48:43 crc kubenswrapper[4910]: I0105 23:48:43.430754 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cjtz9\" (UniqueName: \"kubernetes.io/projected/684847d4-4312-4f43-a1c4-3bea622a95b2-kube-api-access-cjtz9\") pod \"community-operators-jpbcb\" (UID: \"684847d4-4312-4f43-a1c4-3bea622a95b2\") " pod="openshift-marketplace/community-operators-jpbcb" Jan 05 23:48:43 crc kubenswrapper[4910]: I0105 23:48:43.532409 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/684847d4-4312-4f43-a1c4-3bea622a95b2-utilities\") pod \"community-operators-jpbcb\" (UID: \"684847d4-4312-4f43-a1c4-3bea622a95b2\") " pod="openshift-marketplace/community-operators-jpbcb" Jan 05 23:48:43 crc kubenswrapper[4910]: I0105 23:48:43.532602 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/684847d4-4312-4f43-a1c4-3bea622a95b2-catalog-content\") pod \"community-operators-jpbcb\" (UID: \"684847d4-4312-4f43-a1c4-3bea622a95b2\") " pod="openshift-marketplace/community-operators-jpbcb" Jan 05 23:48:43 crc kubenswrapper[4910]: I0105 23:48:43.532630 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cjtz9\" (UniqueName: \"kubernetes.io/projected/684847d4-4312-4f43-a1c4-3bea622a95b2-kube-api-access-cjtz9\") pod \"community-operators-jpbcb\" (UID: \"684847d4-4312-4f43-a1c4-3bea622a95b2\") " pod="openshift-marketplace/community-operators-jpbcb" Jan 05 23:48:43 crc kubenswrapper[4910]: I0105 23:48:43.533775 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/684847d4-4312-4f43-a1c4-3bea622a95b2-utilities\") pod \"community-operators-jpbcb\" (UID: \"684847d4-4312-4f43-a1c4-3bea622a95b2\") " pod="openshift-marketplace/community-operators-jpbcb" Jan 05 23:48:43 crc kubenswrapper[4910]: I0105 23:48:43.534044 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/684847d4-4312-4f43-a1c4-3bea622a95b2-catalog-content\") pod \"community-operators-jpbcb\" (UID: \"684847d4-4312-4f43-a1c4-3bea622a95b2\") " pod="openshift-marketplace/community-operators-jpbcb" Jan 05 23:48:43 crc kubenswrapper[4910]: I0105 23:48:43.561163 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cjtz9\" (UniqueName: \"kubernetes.io/projected/684847d4-4312-4f43-a1c4-3bea622a95b2-kube-api-access-cjtz9\") pod \"community-operators-jpbcb\" (UID: \"684847d4-4312-4f43-a1c4-3bea622a95b2\") " pod="openshift-marketplace/community-operators-jpbcb" Jan 05 23:48:43 crc kubenswrapper[4910]: I0105 23:48:43.734445 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jpbcb" Jan 05 23:48:44 crc kubenswrapper[4910]: I0105 23:48:44.281312 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jpbcb"] Jan 05 23:48:44 crc kubenswrapper[4910]: W0105 23:48:44.289867 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod684847d4_4312_4f43_a1c4_3bea622a95b2.slice/crio-4feb244ed324b40051baee3f9ab02076c1692b96b3be8010c014155403fdf670 WatchSource:0}: Error finding container 4feb244ed324b40051baee3f9ab02076c1692b96b3be8010c014155403fdf670: Status 404 returned error can't find the container with id 4feb244ed324b40051baee3f9ab02076c1692b96b3be8010c014155403fdf670 Jan 05 23:48:44 crc kubenswrapper[4910]: I0105 23:48:44.569873 4910 generic.go:334] "Generic (PLEG): container finished" podID="684847d4-4312-4f43-a1c4-3bea622a95b2" containerID="7f2b2648997590899cf88a34b56e21195f115a5c9ede756ed2d8127495fcd483" exitCode=0 Jan 05 23:48:44 crc kubenswrapper[4910]: I0105 23:48:44.569946 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jpbcb" event={"ID":"684847d4-4312-4f43-a1c4-3bea622a95b2","Type":"ContainerDied","Data":"7f2b2648997590899cf88a34b56e21195f115a5c9ede756ed2d8127495fcd483"} Jan 05 23:48:44 crc kubenswrapper[4910]: I0105 23:48:44.570475 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jpbcb" event={"ID":"684847d4-4312-4f43-a1c4-3bea622a95b2","Type":"ContainerStarted","Data":"4feb244ed324b40051baee3f9ab02076c1692b96b3be8010c014155403fdf670"} Jan 05 23:48:44 crc kubenswrapper[4910]: I0105 23:48:44.572460 4910 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 05 23:48:45 crc kubenswrapper[4910]: I0105 23:48:45.588435 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jpbcb" event={"ID":"684847d4-4312-4f43-a1c4-3bea622a95b2","Type":"ContainerStarted","Data":"fc0e02bd09d105f114e5beeb4e1816b8ffcb72085d0e14da24fe0d3a43b5bfa9"} Jan 05 23:48:46 crc kubenswrapper[4910]: I0105 23:48:46.607191 4910 generic.go:334] "Generic (PLEG): container finished" podID="684847d4-4312-4f43-a1c4-3bea622a95b2" containerID="fc0e02bd09d105f114e5beeb4e1816b8ffcb72085d0e14da24fe0d3a43b5bfa9" exitCode=0 Jan 05 23:48:46 crc kubenswrapper[4910]: I0105 23:48:46.607287 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jpbcb" event={"ID":"684847d4-4312-4f43-a1c4-3bea622a95b2","Type":"ContainerDied","Data":"fc0e02bd09d105f114e5beeb4e1816b8ffcb72085d0e14da24fe0d3a43b5bfa9"} Jan 05 23:48:47 crc kubenswrapper[4910]: I0105 23:48:47.623678 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jpbcb" event={"ID":"684847d4-4312-4f43-a1c4-3bea622a95b2","Type":"ContainerStarted","Data":"ae58adc93ae1e63e7932ad04c63561a3838b590bc7043cd7a4794cd819f1440b"} Jan 05 23:48:47 crc kubenswrapper[4910]: I0105 23:48:47.651628 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-jpbcb" podStartSLOduration=2.172580335 podStartE2EDuration="4.651612052s" podCreationTimestamp="2026-01-05 23:48:43 +0000 UTC" firstStartedPulling="2026-01-05 23:48:44.572170313 +0000 UTC m=+7056.149667993" lastFinishedPulling="2026-01-05 23:48:47.051202 +0000 UTC m=+7058.628699710" observedRunningTime="2026-01-05 23:48:47.650707209 +0000 UTC m=+7059.228204879" watchObservedRunningTime="2026-01-05 23:48:47.651612052 +0000 UTC m=+7059.229109722" Jan 05 23:48:53 crc kubenswrapper[4910]: I0105 23:48:53.734775 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-jpbcb" Jan 05 23:48:53 crc kubenswrapper[4910]: I0105 23:48:53.735639 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-jpbcb" Jan 05 23:48:53 crc kubenswrapper[4910]: I0105 23:48:53.830013 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-jpbcb" Jan 05 23:48:54 crc kubenswrapper[4910]: I0105 23:48:54.826714 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-jpbcb" Jan 05 23:48:54 crc kubenswrapper[4910]: I0105 23:48:54.891573 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jpbcb"] var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515127046756024462 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015127046757017400 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015127030573016510 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015127030574015461 5ustar corecore